diff options
Diffstat (limited to 'llvm')
196 files changed, 5672 insertions, 4279 deletions
diff --git a/llvm/docs/DirectX/DXContainer.rst b/llvm/docs/DirectX/DXContainer.rst index 17452d9..4473f4e 100644 --- a/llvm/docs/DirectX/DXContainer.rst +++ b/llvm/docs/DirectX/DXContainer.rst @@ -530,7 +530,7 @@ but adds a 32-bit access flag. .. code-block:: c struct DescriptorRange_V1_0 { - uint32_t RangeType; + dxil::ResourceClass RangeType; uint32_t NumDescriptors; uint32_t BaseShaderRegister; uint32_t RegisterSpace; @@ -538,12 +538,12 @@ but adds a 32-bit access flag. }; struct DescriptorRange_V1_1 { - dxbc::DescriptorRangeType RangeType; + dxil::ResourceClass RangeType; uint32_t NumDescriptors; uint32_t BaseShaderRegister; uint32_t RegisterSpace; - uint32_t OffsetInDescriptorsFromTableStart; uint32_t Flags; + uint32_t OffsetInDescriptorsFromTableStart; }; Static Samplers @@ -556,22 +556,26 @@ This section also has a variable size, since it can contain multiple static samplers definitions. However, the definition is a fixed sized struct, containing 13 32-byte fields of various enum, float, and integer values. +In version 1.2, the static sampler is 17 bytes. It matches the 1.0 static sampler +but adds a 32-bit access flag. In Version 1.1, it matches static sampler +version 1.0. + .. code-block:: c struct StaticSamplerDesc { - FilterMode Filter; - TextureAddressMode AddressU; - TextureAddressMode AddressV; - TextureAddressMode AddressW; + dxbc::FilterMode Filter; + dxbc::TextureAddressMode AddressU; + dxbc::TextureAddressMode AddressV; + dxbc::TextureAddressMode AddressW; float MipLODBias; uint32_t MaxAnisotropy; - ComparisonFunc ComparisonFunc; - StaticBorderColor BorderColor; + dxbc::ComparisonFunc ComparisonFunc; + dxbc::StaticBorderColor BorderColor; float MinLOD; float MaxLOD; uint32_t ShaderRegister; uint32_t RegisterSpace; - ShaderVisibility ShaderVisibility; + dxbc::ShaderVisibility ShaderVisibility; }; SFI0 Part diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index 15ff129..af218ba 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -613,6 +613,12 @@ LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const DominatorTree *DT = nullptr, bool AllowEphemerals = false); +/// Returns true, if no instruction between \p Assume and \p CtxI may free +/// memory and the function is marked as NoSync. The latter ensures the current +/// function cannot arrange for another thread to free on its behalf. +LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, + const Instruction *CtxI); + enum class OverflowResult { /// Always overflows in the direction of signed/unsigned min value. AlwaysOverflowsLow, diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 7bbad17..88691b9 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4654,23 +4654,6 @@ public: return false; } - /// Allows the target to handle physreg-carried dependency - /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether - /// to add the edge to the dependency graph. - /// Def - input: Selection DAG node defininfg physical register - /// User - input: Selection DAG node using physical register - /// Op - input: Number of User operand - /// PhysReg - inout: set to the physical register if the edge is - /// necessary, unchanged otherwise - /// Cost - inout: physical register copy cost. - /// Returns 'true' is the edge is necessary, 'false' otherwise - virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, - const TargetRegisterInfo *TRI, - const TargetInstrInfo *TII, - MCRegister &PhysReg, int &Cost) const { - return false; - } - /// Target-specific combining of register parts into its original value virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index bf133f0..822245f 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -109,10 +109,15 @@ public: return MC->contains(Reg1.asMCReg(), Reg2.asMCReg()); } - /// Return the cost of copying a value between two registers in this class. - /// A negative number means the register class is very expensive - /// to copy e.g. status flag register classes. - int getCopyCost() const { return MC->getCopyCost(); } + /// Return the cost of copying a value between two registers in this class. If + /// this is the maximum value, the register may be impossible to copy. + uint8_t getCopyCost() const { return MC->getCopyCost(); } + + /// \return true if register class is very expensive to copy e.g. status flag + /// register classes. + bool expensiveOrImpossibleToCopy() const { + return MC->getCopyCost() == std::numeric_limits<uint8_t>::max(); + } /// Return true if this register class may be used to create virtual /// registers. diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td index 38f95a1..bba0d6e 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMP.td +++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -1333,6 +1333,9 @@ def OMP_Tile : Directive<[Spelling<"tile">]> { let allowedOnceClauses = [ VersionedClause<OMPC_Sizes, 51>, ]; + let requiredClauses = [ + VersionedClause<OMPC_Sizes, 51>, + ]; let association = AS_Loop; let category = CA_Executable; } diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h index 6168e24..2e31fe5 100644 --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -2773,6 +2773,14 @@ m_MaskedLoad(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2, return m_Intrinsic<Intrinsic::masked_load>(Op0, Op1, Op2, Op3); } +/// Matches MaskedStore Intrinsic. +template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3> +inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty +m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2, + const Opnd3 &Op3) { + return m_Intrinsic<Intrinsic::masked_store>(Op0, Op1, Op2, Op3); +} + /// Matches MaskedGather Intrinsic. template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3> inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty diff --git a/llvm/include/llvm/MC/MCRegisterInfo.h b/llvm/include/llvm/MC/MCRegisterInfo.h index aad3792..e6fc707 100644 --- a/llvm/include/llvm/MC/MCRegisterInfo.h +++ b/llvm/include/llvm/MC/MCRegisterInfo.h @@ -45,7 +45,7 @@ public: const uint16_t RegSetSize; const uint16_t ID; const uint16_t RegSizeInBits; - const int8_t CopyCost; + const uint8_t CopyCost; const bool Allocatable; const bool BaseClass; @@ -94,7 +94,7 @@ public: /// getCopyCost - Return the cost of copying a value between two registers in /// this class. A negative number means the register class is very expensive /// to copy e.g. status flag register classes. - int getCopyCost() const { return CopyCost; } + uint8_t getCopyCost() const { return CopyCost; } /// isAllocatable - Return true if this register class may be used to create /// virtual registers. diff --git a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h index 1e07fbe..faaff4a 100644 --- a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h +++ b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h @@ -18,8 +18,7 @@ #include "llvm/Support/DataTypes.h" -namespace llvm { -namespace X86Disassembler { +namespace llvm::X86Disassembler { #define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers #define CONTEXTS_SYM x86DisassemblerContexts @@ -541,7 +540,6 @@ static const unsigned X86_MAX_OPERANDS = 6; /// respectively. enum DisassemblerMode { MODE_16BIT, MODE_32BIT, MODE_64BIT }; -} // namespace X86Disassembler -} // namespace llvm +} // namespace llvm::X86Disassembler #endif diff --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h index 2454149..74a4d6c 100644 --- a/llvm/include/llvm/Transforms/Scalar/GVN.h +++ b/llvm/include/llvm/Transforms/Scalar/GVN.h @@ -56,6 +56,7 @@ class OptimizationRemarkEmitter; class PHINode; class TargetLibraryInfo; class Value; +class IntrinsicInst; /// A private "module" namespace for types and utilities used by GVN. These /// are implementation details and should not be used by clients. namespace LLVM_LIBRARY_VISIBILITY_NAMESPACE gvn { @@ -349,6 +350,7 @@ private: // Helper functions of redundant load elimination. bool processLoad(LoadInst *L); + bool processMaskedLoad(IntrinsicInst *I); bool processNonLocalLoad(LoadInst *L); bool processAssumeIntrinsic(AssumeInst *II); diff --git a/llvm/lib/Analysis/HashRecognize.cpp b/llvm/lib/Analysis/HashRecognize.cpp index 5d7ee1f..4529123 100644 --- a/llvm/lib/Analysis/HashRecognize.cpp +++ b/llvm/lib/Analysis/HashRecognize.cpp @@ -97,7 +97,7 @@ static bool containsUnreachable(const Loop &L, } } } - return std::distance(Latch->begin(), Latch->end()) != Visited.size(); + return Latch->size() != Visited.size(); } /// A structure that can hold either a Simple Recurrence or a Conditional diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index 47dccde..7adb25d 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -233,19 +233,25 @@ static bool evaluatePtrAddRecAtMaxBTCWillNotWrap( const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes); // Check if we have a suitable dereferencable assumption we can use. - if (!StartPtrV->canBeFreed()) { - Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt(); - if (BasicBlock *LoopPred = L->getLoopPredecessor()) { - if (isa<BranchInst>(LoopPred->getTerminator())) - CtxI = LoopPred->getTerminator(); - } - - RetainedKnowledge DerefRK = getKnowledgeValidInContext( - StartPtrV, {Attribute::Dereferenceable}, *AC, CtxI, DT); - if (DerefRK) { - DerefBytesSCEV = - SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue)); - } + Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt(); + if (BasicBlock *LoopPred = L->getLoopPredecessor()) { + if (isa<BranchInst>(LoopPred->getTerminator())) + CtxI = LoopPred->getTerminator(); + } + RetainedKnowledge DerefRK; + getKnowledgeForValue(StartPtrV, {Attribute::Dereferenceable}, *AC, + [&](RetainedKnowledge RK, Instruction *Assume, auto) { + if (!isValidAssumeForContext(Assume, CtxI, DT)) + return false; + if (StartPtrV->canBeFreed() && + !willNotFreeBetween(Assume, CtxI)) + return false; + DerefRK = std::max(DerefRK, RK); + return true; + }); + if (DerefRK) { + DerefBytesSCEV = + SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue)); } if (DerefBytesSCEV->isZero()) diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 09a8fbe..1eda7a7 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -89,6 +89,9 @@ using namespace llvm::PatternMatch; static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20)); +/// Maximum number of instructions to check between assume and context +/// instruction. +static constexpr unsigned MaxInstrsToCheckForFree = 16; /// Returns the bitwidth of the given scalar or pointer type. For vector types, /// returns the element type's bitwidth. @@ -561,6 +564,29 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv, return false; } +bool llvm::willNotFreeBetween(const Instruction *Assume, + const Instruction *CtxI) { + if (CtxI->getParent() != Assume->getParent() || !Assume->comesBefore(CtxI)) + return false; + // Make sure the current function cannot arrange for another thread to free on + // its behalf. + if (!CtxI->getFunction()->hasNoSync()) + return false; + + // Check if there are any calls between the assume and CtxI that may + // free memory. + for (const auto &[Idx, I] : + enumerate(make_range(Assume->getIterator(), CtxI->getIterator()))) { + // Limit number of instructions to walk. + if (Idx > MaxInstrsToCheckForFree) + return false; + if (const auto *CB = dyn_cast<CallBase>(&I)) + if (!CB->hasFnAttr(Attribute::NoFree)) + return false; + } + return true; +} + // TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but // we still have enough information about `RHS` to conclude non-zero. For // example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index 1703b27..bc0bb34 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -618,12 +618,15 @@ bool DwarfExpression::addExpression( case dwarf::DW_OP_dup: case dwarf::DW_OP_push_object_address: case dwarf::DW_OP_over: + case dwarf::DW_OP_rot: case dwarf::DW_OP_eq: case dwarf::DW_OP_ne: case dwarf::DW_OP_gt: case dwarf::DW_OP_ge: case dwarf::DW_OP_lt: case dwarf::DW_OP_le: + case dwarf::DW_OP_neg: + case dwarf::DW_OP_abs: emitOp(OpNum); break; case dwarf::DW_OP_deref: diff --git a/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/llvm/lib/CodeGen/MachineRegisterInfo.cpp index abb3f3e..ae284f3 100644 --- a/llvm/lib/CodeGen/MachineRegisterInfo.cpp +++ b/llvm/lib/CodeGen/MachineRegisterInfo.cpp @@ -83,8 +83,6 @@ constrainRegClass(MachineRegisterInfo &MRI, Register Reg, const TargetRegisterClass *MachineRegisterInfo::constrainRegClass( Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs) { - if (Reg.isPhysical()) - return nullptr; return ::constrainRegClass(*this, Reg, getRegClass(Reg), RC, MinNumRegs); } diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 11bc64c..bb10cf6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -160,7 +160,7 @@ void InstrEmitter::EmitCopyFromReg(SDValue Op, bool IsClone, Register SrcReg, // If all uses are reading from the src physical register and copying the // register is either impossible or very expensive, then don't create a copy. - if (MatchReg && SrcRC->getCopyCost() < 0) { + if (MatchReg && SrcRC->expensiveOrImpossibleToCopy()) { VRBase = SrcReg; } else { // Create the reg, emit the copy. diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 31e7855..4f4fb9c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -111,15 +111,11 @@ SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) { static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, - const TargetLowering &TLI, MCRegister &PhysReg, int &Cost) { if (Op != 2 || User->getOpcode() != ISD::CopyToReg) return; Register Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); - if (TLI.checkForPhysRegDependency(Def, User, Op, TRI, TII, PhysReg, Cost)) - return; - if (Reg.isVirtual()) return; @@ -136,7 +132,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, if (PhysReg) { const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, Def->getSimpleValueType(ResNo)); - Cost = RC->getCopyCost(); + Cost = RC->expensiveOrImpossibleToCopy() ? -1 : RC->getCopyCost(); } } @@ -490,8 +486,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() { MCRegister PhysReg; int Cost = 1; // Determine if this is a physical register dependency. - const TargetLowering &TLI = DAG->getTargetLoweringInfo(); - CheckForPhysRegDependency(OpN, N, i, TRI, TII, TLI, PhysReg, Cost); + CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost); assert((!PhysReg || !isChain) && "Chain dependence via physreg data?"); // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler // emits a copy from the physical register to a virtual register unless diff --git a/llvm/lib/IR/Assumptions.cpp b/llvm/lib/IR/Assumptions.cpp index f8bbcb3..3397f0e 100644 --- a/llvm/lib/IR/Assumptions.cpp +++ b/llvm/lib/IR/Assumptions.cpp @@ -20,9 +20,8 @@ using namespace llvm; -namespace { -bool hasAssumption(const Attribute &A, - const KnownAssumptionString &AssumptionStr) { +static bool hasAssumption(const Attribute &A, + const KnownAssumptionString &AssumptionStr) { if (!A.isValid()) return false; assert(A.isStringAttribute() && "Expected a string attribute!"); @@ -33,7 +32,7 @@ bool hasAssumption(const Attribute &A, return llvm::is_contained(Strings, AssumptionStr); } -DenseSet<StringRef> getAssumptions(const Attribute &A) { +static DenseSet<StringRef> getAssumptions(const Attribute &A) { if (!A.isValid()) return DenseSet<StringRef>(); assert(A.isStringAttribute() && "Expected a string attribute!"); @@ -47,8 +46,8 @@ DenseSet<StringRef> getAssumptions(const Attribute &A) { } template <typename AttrSite> -bool addAssumptionsImpl(AttrSite &Site, - const DenseSet<StringRef> &Assumptions) { +static bool addAssumptionsImpl(AttrSite &Site, + const DenseSet<StringRef> &Assumptions) { if (Assumptions.empty()) return false; @@ -64,7 +63,6 @@ bool addAssumptionsImpl(AttrSite &Site, return true; } -} // namespace bool llvm::hasAssumption(const Function &F, const KnownAssumptionString &AssumptionStr) { diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp index 1ededb9e7..77d044b 100644 --- a/llvm/lib/IR/DebugInfoMetadata.cpp +++ b/llvm/lib/IR/DebugInfoMetadata.cpp @@ -1768,6 +1768,7 @@ bool DIExpression::isValid() const { case dwarf::DW_OP_bregx: case dwarf::DW_OP_push_object_address: case dwarf::DW_OP_over: + case dwarf::DW_OP_rot: case dwarf::DW_OP_consts: case dwarf::DW_OP_eq: case dwarf::DW_OP_ne: @@ -1775,6 +1776,8 @@ bool DIExpression::isValid() const { case dwarf::DW_OP_ge: case dwarf::DW_OP_lt: case dwarf::DW_OP_le: + case dwarf::DW_OP_neg: + case dwarf::DW_OP_abs: break; } } diff --git a/llvm/lib/IR/DiagnosticHandler.cpp b/llvm/lib/IR/DiagnosticHandler.cpp index 683eade..eb2fe3b 100644 --- a/llvm/lib/IR/DiagnosticHandler.cpp +++ b/llvm/lib/IR/DiagnosticHandler.cpp @@ -36,6 +36,7 @@ struct PassRemarksOpt { } } }; +} // namespace static PassRemarksOpt PassRemarksPassedOptLoc; static PassRemarksOpt PassRemarksMissedOptLoc; @@ -66,7 +67,6 @@ static cl::opt<PassRemarksOpt, true, cl::parser<std::string>> "Enable optimization analysis remarks from passes whose name match " "the given regular expression"), cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired); -} bool DiagnosticHandler::isAnalysisRemarkEnabled(StringRef PassName) const { return (PassRemarksAnalysisOptLoc.Pattern && diff --git a/llvm/lib/IR/ModuleSummaryIndex.cpp b/llvm/lib/IR/ModuleSummaryIndex.cpp index d9024b0..dc55b63 100644 --- a/llvm/lib/IR/ModuleSummaryIndex.cpp +++ b/llvm/lib/IR/ModuleSummaryIndex.cpp @@ -409,7 +409,7 @@ struct Edge { GlobalValue::GUID Src; GlobalValue::GUID Dst; }; -} +} // namespace void Attributes::add(const Twine &Name, const Twine &Value, const Twine &Comment) { diff --git a/llvm/lib/IR/PassInstrumentation.cpp b/llvm/lib/IR/PassInstrumentation.cpp index 70bbe8f..52aad8f 100644 --- a/llvm/lib/IR/PassInstrumentation.cpp +++ b/llvm/lib/IR/PassInstrumentation.cpp @@ -15,7 +15,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/IR/PassManager.h" -namespace llvm { +using namespace llvm; template struct LLVM_EXPORT_TEMPLATE Any::TypeId<const Module *>; template struct LLVM_EXPORT_TEMPLATE Any::TypeId<const Function *>; @@ -42,7 +42,8 @@ PassInstrumentationCallbacks::getPassNameForClassName(StringRef ClassName) { AnalysisKey PassInstrumentationAnalysis::Key; -bool isSpecialPass(StringRef PassID, const std::vector<StringRef> &Specials) { +bool llvm::isSpecialPass(StringRef PassID, + const std::vector<StringRef> &Specials) { size_t Pos = PassID.find('<'); StringRef Prefix = PassID; if (Pos != StringRef::npos) @@ -50,5 +51,3 @@ bool isSpecialPass(StringRef PassID, const std::vector<StringRef> &Specials) { return any_of(Specials, [Prefix](StringRef S) { return Prefix.ends_with(S); }); } - -} // namespace llvm diff --git a/llvm/lib/IR/ProfDataUtils.cpp b/llvm/lib/IR/ProfDataUtils.cpp index edeca97..fc2be51 100644 --- a/llvm/lib/IR/ProfDataUtils.cpp +++ b/llvm/lib/IR/ProfDataUtils.cpp @@ -24,8 +24,6 @@ using namespace llvm; -namespace { - // MD_prof nodes have the following layout // // In general: @@ -41,14 +39,15 @@ namespace { // correctly, and can change the behavior in the future if the layout changes // the minimum number of operands for MD_prof nodes with branch weights -constexpr unsigned MinBWOps = 3; +static constexpr unsigned MinBWOps = 3; // the minimum number of operands for MD_prof nodes with value profiles -constexpr unsigned MinVPOps = 5; +static constexpr unsigned MinVPOps = 5; // We may want to add support for other MD_prof types, so provide an abstraction // for checking the metadata type. -bool isTargetMD(const MDNode *ProfData, const char *Name, unsigned MinOps) { +static bool isTargetMD(const MDNode *ProfData, const char *Name, + unsigned MinOps) { // TODO: This routine may be simplified if MD_prof used an enum instead of a // string to differentiate the types of MD_prof nodes. if (!ProfData || !Name || MinOps < 2) @@ -101,14 +100,11 @@ static SmallVector<uint32_t> fitWeights(ArrayRef<uint64_t> Weights) { return Ret; } -} // namespace - -namespace llvm { -cl::opt<bool> ElideAllZeroBranchWeights("elide-all-zero-branch-weights", +static cl::opt<bool> ElideAllZeroBranchWeights("elide-all-zero-branch-weights", #if defined(LLVM_ENABLE_PROFCHECK) - cl::init(false) + cl::init(false) #else - cl::init(true) + cl::init(true) #endif ); const char *MDProfLabels::BranchWeights = "branch_weights"; @@ -118,21 +114,21 @@ const char *MDProfLabels::FunctionEntryCount = "function_entry_count"; const char *MDProfLabels::SyntheticFunctionEntryCount = "synthetic_function_entry_count"; const char *MDProfLabels::UnknownBranchWeightsMarker = "unknown"; -const char *LLVMLoopEstimatedTripCount = "llvm.loop.estimated_trip_count"; +const char *llvm::LLVMLoopEstimatedTripCount = "llvm.loop.estimated_trip_count"; -bool hasProfMD(const Instruction &I) { +bool llvm::hasProfMD(const Instruction &I) { return I.hasMetadata(LLVMContext::MD_prof); } -bool isBranchWeightMD(const MDNode *ProfileData) { +bool llvm::isBranchWeightMD(const MDNode *ProfileData) { return isTargetMD(ProfileData, MDProfLabels::BranchWeights, MinBWOps); } -bool isValueProfileMD(const MDNode *ProfileData) { +bool llvm::isValueProfileMD(const MDNode *ProfileData) { return isTargetMD(ProfileData, MDProfLabels::ValueProfile, MinVPOps); } -bool hasBranchWeightMD(const Instruction &I) { +bool llvm::hasBranchWeightMD(const Instruction &I) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); return isBranchWeightMD(ProfileData); } @@ -147,16 +143,16 @@ static bool hasCountTypeMD(const Instruction &I) { return isa<CallBase>(I) && !isBranchWeightMD(ProfileData); } -bool hasValidBranchWeightMD(const Instruction &I) { +bool llvm::hasValidBranchWeightMD(const Instruction &I) { return getValidBranchWeightMDNode(I); } -bool hasBranchWeightOrigin(const Instruction &I) { +bool llvm::hasBranchWeightOrigin(const Instruction &I) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); return hasBranchWeightOrigin(ProfileData); } -bool hasBranchWeightOrigin(const MDNode *ProfileData) { +bool llvm::hasBranchWeightOrigin(const MDNode *ProfileData) { if (!isBranchWeightMD(ProfileData)) return false; auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(1)); @@ -168,54 +164,54 @@ bool hasBranchWeightOrigin(const MDNode *ProfileData) { return ProfDataName != nullptr; } -unsigned getBranchWeightOffset(const MDNode *ProfileData) { +unsigned llvm::getBranchWeightOffset(const MDNode *ProfileData) { return hasBranchWeightOrigin(ProfileData) ? 2 : 1; } -unsigned getNumBranchWeights(const MDNode &ProfileData) { +unsigned llvm::getNumBranchWeights(const MDNode &ProfileData) { return ProfileData.getNumOperands() - getBranchWeightOffset(&ProfileData); } -MDNode *getBranchWeightMDNode(const Instruction &I) { +MDNode *llvm::getBranchWeightMDNode(const Instruction &I) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); if (!isBranchWeightMD(ProfileData)) return nullptr; return ProfileData; } -MDNode *getValidBranchWeightMDNode(const Instruction &I) { +MDNode *llvm::getValidBranchWeightMDNode(const Instruction &I) { auto *ProfileData = getBranchWeightMDNode(I); if (ProfileData && getNumBranchWeights(*ProfileData) == I.getNumSuccessors()) return ProfileData; return nullptr; } -void extractFromBranchWeightMD32(const MDNode *ProfileData, - SmallVectorImpl<uint32_t> &Weights) { +void llvm::extractFromBranchWeightMD32(const MDNode *ProfileData, + SmallVectorImpl<uint32_t> &Weights) { extractFromBranchWeightMD(ProfileData, Weights); } -void extractFromBranchWeightMD64(const MDNode *ProfileData, - SmallVectorImpl<uint64_t> &Weights) { +void llvm::extractFromBranchWeightMD64(const MDNode *ProfileData, + SmallVectorImpl<uint64_t> &Weights) { extractFromBranchWeightMD(ProfileData, Weights); } -bool extractBranchWeights(const MDNode *ProfileData, - SmallVectorImpl<uint32_t> &Weights) { +bool llvm::extractBranchWeights(const MDNode *ProfileData, + SmallVectorImpl<uint32_t> &Weights) { if (!isBranchWeightMD(ProfileData)) return false; extractFromBranchWeightMD(ProfileData, Weights); return true; } -bool extractBranchWeights(const Instruction &I, - SmallVectorImpl<uint32_t> &Weights) { +bool llvm::extractBranchWeights(const Instruction &I, + SmallVectorImpl<uint32_t> &Weights) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); return extractBranchWeights(ProfileData, Weights); } -bool extractBranchWeights(const Instruction &I, uint64_t &TrueVal, - uint64_t &FalseVal) { +bool llvm::extractBranchWeights(const Instruction &I, uint64_t &TrueVal, + uint64_t &FalseVal) { assert((I.getOpcode() == Instruction::Br || I.getOpcode() == Instruction::Select) && "Looking for branch weights on something besides branch, select, or " @@ -234,7 +230,8 @@ bool extractBranchWeights(const Instruction &I, uint64_t &TrueVal, return true; } -bool extractProfTotalWeight(const MDNode *ProfileData, uint64_t &TotalVal) { +bool llvm::extractProfTotalWeight(const MDNode *ProfileData, + uint64_t &TotalVal) { TotalVal = 0; if (!ProfileData) return false; @@ -262,11 +259,12 @@ bool extractProfTotalWeight(const MDNode *ProfileData, uint64_t &TotalVal) { return false; } -bool extractProfTotalWeight(const Instruction &I, uint64_t &TotalVal) { +bool llvm::extractProfTotalWeight(const Instruction &I, uint64_t &TotalVal) { return extractProfTotalWeight(I.getMetadata(LLVMContext::MD_prof), TotalVal); } -void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName) { +void llvm::setExplicitlyUnknownBranchWeights(Instruction &I, + StringRef PassName) { MDBuilder MDB(I.getContext()); I.setMetadata( LLVMContext::MD_prof, @@ -275,14 +273,16 @@ void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName) { MDB.createString(PassName)})); } -void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, Function &F, - StringRef PassName) { +void llvm::setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, + Function &F, + StringRef PassName) { if (std::optional<Function::ProfileCount> EC = F.getEntryCount(); EC && EC->getCount() > 0) setExplicitlyUnknownBranchWeights(I, PassName); } -void setExplicitlyUnknownFunctionEntryCount(Function &F, StringRef PassName) { +void llvm::setExplicitlyUnknownFunctionEntryCount(Function &F, + StringRef PassName) { MDBuilder MDB(F.getContext()); F.setMetadata( LLVMContext::MD_prof, @@ -291,21 +291,21 @@ void setExplicitlyUnknownFunctionEntryCount(Function &F, StringRef PassName) { MDB.createString(PassName)})); } -bool isExplicitlyUnknownProfileMetadata(const MDNode &MD) { +bool llvm::isExplicitlyUnknownProfileMetadata(const MDNode &MD) { if (MD.getNumOperands() != 2) return false; return MD.getOperand(0).equalsStr(MDProfLabels::UnknownBranchWeightsMarker); } -bool hasExplicitlyUnknownBranchWeights(const Instruction &I) { +bool llvm::hasExplicitlyUnknownBranchWeights(const Instruction &I) { auto *MD = I.getMetadata(LLVMContext::MD_prof); if (!MD) return false; return isExplicitlyUnknownProfileMetadata(*MD); } -void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, - bool IsExpected, bool ElideAllZero) { +void llvm::setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, + bool IsExpected, bool ElideAllZero) { if ((ElideAllZeroBranchWeights && ElideAllZero) && llvm::all_of(Weights, [](uint32_t V) { return V == 0; })) { I.setMetadata(LLVMContext::MD_prof, nullptr); @@ -317,13 +317,14 @@ void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, I.setMetadata(LLVMContext::MD_prof, BranchWeights); } -void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights, - bool IsExpected, bool ElideAllZero) { +void llvm::setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights, + bool IsExpected, bool ElideAllZero) { setBranchWeights(I, fitWeights(Weights), IsExpected, ElideAllZero); } -SmallVector<uint32_t> downscaleWeights(ArrayRef<uint64_t> Weights, - std::optional<uint64_t> KnownMaxCount) { +SmallVector<uint32_t> +llvm::downscaleWeights(ArrayRef<uint64_t> Weights, + std::optional<uint64_t> KnownMaxCount) { uint64_t MaxCount = KnownMaxCount.has_value() ? KnownMaxCount.value() : *llvm::max_element(Weights); assert(MaxCount > 0 && "Bad max count"); @@ -334,7 +335,7 @@ SmallVector<uint32_t> downscaleWeights(ArrayRef<uint64_t> Weights, return DownscaledWeights; } -void scaleProfData(Instruction &I, uint64_t S, uint64_t T) { +void llvm::scaleProfData(Instruction &I, uint64_t S, uint64_t T) { assert(T != 0 && "Caller should guarantee"); auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); if (ProfileData == nullptr) @@ -387,5 +388,3 @@ void scaleProfData(Instruction &I, uint64_t S, uint64_t T) { } I.setMetadata(LLVMContext::MD_prof, MDNode::get(C, Vals)); } - -} // namespace llvm diff --git a/llvm/lib/IR/SafepointIRVerifier.cpp b/llvm/lib/IR/SafepointIRVerifier.cpp index e54894c..e35b5b3 100644 --- a/llvm/lib/IR/SafepointIRVerifier.cpp +++ b/llvm/lib/IR/SafepointIRVerifier.cpp @@ -196,7 +196,6 @@ protected: static void Verify(const Function &F, const DominatorTree &DT, const CFGDeadness &CD); -namespace llvm { PreservedAnalyses SafepointIRVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { const auto &DT = AM.getResult<DominatorTreeAnalysis>(F); @@ -205,7 +204,6 @@ PreservedAnalyses SafepointIRVerifierPass::run(Function &F, Verify(F, DT, CD); return PreservedAnalyses::all(); } -} // namespace llvm namespace { diff --git a/llvm/lib/IR/VFABIDemangler.cpp b/llvm/lib/IR/VFABIDemangler.cpp index 2de05a5..4fcf436 100644 --- a/llvm/lib/IR/VFABIDemangler.cpp +++ b/llvm/lib/IR/VFABIDemangler.cpp @@ -20,15 +20,16 @@ using namespace llvm; #define DEBUG_TYPE "vfabi-demangler" -namespace { /// Utilities for the Vector Function ABI name parser. +namespace { /// Return types for the parser functions. enum class ParseRet { OK, // Found. None, // Not found. Error // Syntax error. }; +} // namespace /// Extracts the `<isa>` information from the mangled string, and /// sets the `ISA` accordingly. If successful, the <isa> token is removed @@ -372,7 +373,6 @@ getScalableECFromSignature(const FunctionType *Signature, const VFISAKind ISA, return std::nullopt; } -} // namespace // Format of the ABI name: // _ZGV<isa><mask><vlen><parameters>_<scalarname>[(<redirection>)] diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index a347609..b775cbb 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -622,6 +622,7 @@ enum PointerStripKind { PSK_InBoundsConstantIndices, PSK_InBounds }; +} // end anonymous namespace template <PointerStripKind StripKind> static void NoopCallback(const Value *) {} @@ -696,7 +697,6 @@ static const Value *stripPointerCastsAndOffsets( return V; } -} // end anonymous namespace const Value *Value::stripPointerCasts() const { return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this); diff --git a/llvm/lib/Object/BuildID.cpp b/llvm/lib/Object/BuildID.cpp index 89d6bc3..d1ee597 100644 --- a/llvm/lib/Object/BuildID.cpp +++ b/llvm/lib/Object/BuildID.cpp @@ -24,6 +24,24 @@ using namespace llvm::object; namespace { template <typename ELFT> BuildIDRef getBuildID(const ELFFile<ELFT> &Obj) { + auto findBuildID = [&Obj](const auto &ShdrOrPhdr, + uint64_t Alignment) -> std::optional<BuildIDRef> { + Error Err = Error::success(); + for (auto N : Obj.notes(ShdrOrPhdr, Err)) + if (N.getType() == ELF::NT_GNU_BUILD_ID && + N.getName() == ELF::ELF_NOTE_GNU) + return N.getDesc(Alignment); + consumeError(std::move(Err)); + return std::nullopt; + }; + + auto Sections = cantFail(Obj.sections()); + for (const auto &S : Sections) { + if (S.sh_type != ELF::SHT_NOTE) + continue; + if (std::optional<BuildIDRef> ShdrRes = findBuildID(S, S.sh_addralign)) + return ShdrRes.value(); + } auto PhdrsOrErr = Obj.program_headers(); if (!PhdrsOrErr) { consumeError(PhdrsOrErr.takeError()); @@ -32,12 +50,8 @@ template <typename ELFT> BuildIDRef getBuildID(const ELFFile<ELFT> &Obj) { for (const auto &P : *PhdrsOrErr) { if (P.p_type != ELF::PT_NOTE) continue; - Error Err = Error::success(); - for (auto N : Obj.notes(P, Err)) - if (N.getType() == ELF::NT_GNU_BUILD_ID && - N.getName() == ELF::ELF_NOTE_GNU) - return N.getDesc(P.p_align); - consumeError(std::move(Err)); + if (std::optional<BuildIDRef> PhdrRes = findBuildID(P, P.p_align)) + return PhdrRes.value(); } return {}; } diff --git a/llvm/lib/TableGen/Error.cpp b/llvm/lib/TableGen/Error.cpp index de0c4c9..3ba2c6c 100644 --- a/llvm/lib/TableGen/Error.cpp +++ b/llvm/lib/TableGen/Error.cpp @@ -19,10 +19,10 @@ #include "llvm/TableGen/Record.h" #include <cstdlib> -namespace llvm { +using namespace llvm; -SourceMgr SrcMgr; -unsigned ErrorsPrinted = 0; +SourceMgr llvm::SrcMgr; +unsigned llvm::ErrorsPrinted = 0; static void PrintMessage(ArrayRef<SMLoc> Locs, SourceMgr::DiagKind Kind, const Twine &Msg) { @@ -49,118 +49,118 @@ static void PrintMessage(ArrayRef<SMLoc> Locs, SourceMgr::DiagKind Kind, // Functions to print notes. -void PrintNote(const Twine &Msg) { - WithColor::note() << Msg << "\n"; -} +void llvm::PrintNote(const Twine &Msg) { WithColor::note() << Msg << "\n"; } -void PrintNote(function_ref<void(raw_ostream &OS)> PrintMsg) { +void llvm::PrintNote(function_ref<void(raw_ostream &OS)> PrintMsg) { PrintMsg(WithColor::note()); } -void PrintNote(ArrayRef<SMLoc> NoteLoc, const Twine &Msg) { +void llvm::PrintNote(ArrayRef<SMLoc> NoteLoc, const Twine &Msg) { PrintMessage(NoteLoc, SourceMgr::DK_Note, Msg); } // Functions to print fatal notes. -void PrintFatalNote(const Twine &Msg) { +void llvm::PrintFatalNote(const Twine &Msg) { PrintNote(Msg); fatal_exit(); } -void PrintFatalNote(ArrayRef<SMLoc> NoteLoc, const Twine &Msg) { +void llvm::PrintFatalNote(ArrayRef<SMLoc> NoteLoc, const Twine &Msg) { PrintNote(NoteLoc, Msg); fatal_exit(); } // This method takes a Record and uses the source location // stored in it. -void PrintFatalNote(const Record *Rec, const Twine &Msg) { +void llvm::PrintFatalNote(const Record *Rec, const Twine &Msg) { PrintNote(Rec->getLoc(), Msg); fatal_exit(); } // This method takes a RecordVal and uses the source location // stored in it. -void PrintFatalNote(const RecordVal *RecVal, const Twine &Msg) { +void llvm::PrintFatalNote(const RecordVal *RecVal, const Twine &Msg) { PrintNote(RecVal->getLoc(), Msg); fatal_exit(); } // Functions to print warnings. -void PrintWarning(const Twine &Msg) { WithColor::warning() << Msg << "\n"; } +void llvm::PrintWarning(const Twine &Msg) { + WithColor::warning() << Msg << "\n"; +} -void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg) { +void llvm::PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg) { PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg); } -void PrintWarning(const char *Loc, const Twine &Msg) { +void llvm::PrintWarning(const char *Loc, const Twine &Msg) { SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Warning, Msg); } // Functions to print errors. -void PrintError(const Twine &Msg) { WithColor::error() << Msg << "\n"; } +void llvm::PrintError(const Twine &Msg) { WithColor::error() << Msg << "\n"; } -void PrintError(function_ref<void(raw_ostream &OS)> PrintMsg) { +void llvm::PrintError(function_ref<void(raw_ostream &OS)> PrintMsg) { PrintMsg(WithColor::error()); } -void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) { +void llvm::PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) { PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg); } -void PrintError(const char *Loc, const Twine &Msg) { +void llvm::PrintError(const char *Loc, const Twine &Msg) { SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Error, Msg); } // This method takes a Record and uses the source location // stored in it. -void PrintError(const Record *Rec, const Twine &Msg) { +void llvm::PrintError(const Record *Rec, const Twine &Msg) { PrintMessage(Rec->getLoc(), SourceMgr::DK_Error, Msg); } // This method takes a RecordVal and uses the source location // stored in it. -void PrintError(const RecordVal *RecVal, const Twine &Msg) { +void llvm::PrintError(const RecordVal *RecVal, const Twine &Msg) { PrintMessage(RecVal->getLoc(), SourceMgr::DK_Error, Msg); } // Functions to print fatal errors. -void PrintFatalError(const Twine &Msg) { +void llvm::PrintFatalError(const Twine &Msg) { PrintError(Msg); fatal_exit(); } -void PrintFatalError(function_ref<void(raw_ostream &OS)> PrintMsg) { +void llvm::PrintFatalError(function_ref<void(raw_ostream &OS)> PrintMsg) { PrintError(PrintMsg); fatal_exit(); } -void PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) { +void llvm::PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) { PrintError(ErrorLoc, Msg); fatal_exit(); } // This method takes a Record and uses the source location // stored in it. -void PrintFatalError(const Record *Rec, const Twine &Msg) { +void llvm::PrintFatalError(const Record *Rec, const Twine &Msg) { PrintError(Rec->getLoc(), Msg); fatal_exit(); } // This method takes a RecordVal and uses the source location // stored in it. -void PrintFatalError(const RecordVal *RecVal, const Twine &Msg) { +void llvm::PrintFatalError(const RecordVal *RecVal, const Twine &Msg) { PrintError(RecVal->getLoc(), Msg); fatal_exit(); } // Check an assertion: Obtain the condition value and be sure it is true. // If not, print a nonfatal error along with the message. -bool CheckAssert(SMLoc Loc, const Init *Condition, const Init *Message) { +bool llvm::CheckAssert(SMLoc Loc, const Init *Condition, const Init *Message) { auto *CondValue = dyn_cast_or_null<IntInit>(Condition->convertInitializerTo( IntRecTy::get(Condition->getRecordKeeper()))); if (!CondValue) { @@ -178,11 +178,9 @@ bool CheckAssert(SMLoc Loc, const Init *Condition, const Init *Message) { } // Dump a message to stderr. -void dumpMessage(SMLoc Loc, const Init *Message) { +void llvm::dumpMessage(SMLoc Loc, const Init *Message) { if (auto *MessageInit = dyn_cast<StringInit>(Message)) PrintNote(Loc, MessageInit->getValue()); else PrintError(Loc, "dump value is not of type string"); } - -} // end namespace llvm diff --git a/llvm/lib/TableGen/Main.cpp b/llvm/lib/TableGen/Main.cpp index f545706..42043f7 100644 --- a/llvm/lib/TableGen/Main.cpp +++ b/llvm/lib/TableGen/Main.cpp @@ -64,14 +64,12 @@ WriteIfChanged("write-if-changed", cl::desc("Only write output if it changed")); static cl::opt<bool> TimePhases("time-phases", cl::desc("Time phases of parser and backend")); -namespace llvm { -cl::opt<bool> EmitLongStrLiterals( +cl::opt<bool> llvm::EmitLongStrLiterals( "long-string-literals", cl::desc("when emitting large string tables, prefer string literals over " "comma-separated char literals. This can be a readability and " "compile-time performance win, but upsets some compilers"), cl::Hidden, cl::init(true)); -} // end namespace llvm static cl::opt<bool> NoWarnOnUnusedTemplateArgs( "no-warn-on-unused-template-args", diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp index 051a896..2ea3a24 100644 --- a/llvm/lib/TableGen/Record.cpp +++ b/llvm/lib/TableGen/Record.cpp @@ -46,8 +46,7 @@ using namespace llvm; // Context //===----------------------------------------------------------------------===// -namespace llvm { -namespace detail { +namespace llvm::detail { /// This class represents the internal implementation of the RecordKeeper. /// It contains all of the contextual static state of the Record classes. It is /// kept out-of-line to simplify dependencies, and also make it easier for @@ -100,8 +99,7 @@ struct RecordKeeperImpl { void dumpAllocationStats(raw_ostream &OS) const; }; -} // namespace detail -} // namespace llvm +} // namespace llvm::detail void detail::RecordKeeperImpl::dumpAllocationStats(raw_ostream &OS) const { // Dump memory allocation related stats. diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp index f928ded..3d31d8e 100644 --- a/llvm/lib/TableGen/TGParser.cpp +++ b/llvm/lib/TableGen/TGParser.cpp @@ -31,8 +31,6 @@ using namespace llvm; // Support Code for the Semantic Actions. //===----------------------------------------------------------------------===// -namespace llvm { - RecordsEntry::RecordsEntry(std::unique_ptr<Record> Rec) : Rec(std::move(Rec)) {} RecordsEntry::RecordsEntry(std::unique_ptr<ForeachLoop> Loop) : Loop(std::move(Loop)) {} @@ -41,6 +39,7 @@ RecordsEntry::RecordsEntry(std::unique_ptr<Record::AssertionInfo> Assertion) RecordsEntry::RecordsEntry(std::unique_ptr<Record::DumpInfo> Dump) : Dump(std::move(Dump)) {} +namespace llvm { struct SubClassReference { SMRange RefRange; const Record *Rec = nullptr; @@ -61,6 +60,7 @@ struct SubMultiClassReference { bool isInvalid() const { return MC == nullptr; } void dump() const; }; +} // end namespace llvm #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD void SubMultiClassReference::dump() const { @@ -74,8 +74,6 @@ LLVM_DUMP_METHOD void SubMultiClassReference::dump() const { } #endif -} // end namespace llvm - static bool checkBitsConcrete(Record &R, const RecordVal &RV) { const auto *BV = cast<BitsInit>(RV.getValue()); for (unsigned i = 0, e = BV->getNumBits(); i != e; ++i) { diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 8d6eb91..4357264d 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -282,7 +282,7 @@ static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects", static cl::opt<bool> SplitSVEObjects("aarch64-split-sve-objects", cl::desc("Split allocation of ZPR & PPR objects"), - cl::init(false), cl::Hidden); + cl::init(true), cl::Hidden); cl::opt<bool> EnableHomogeneousPrologEpilog( "homogeneous-prolog-epilog", cl::Hidden, diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 36c9cb6..bc6b931 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1010,6 +1010,36 @@ let Predicates = [HasSVE_or_SME] in { defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>; defm SPLICE_ZPZ : sve_int_perm_splice<"splice", AArch64splice>; + + // mul x (splat -1) -> neg x + def : Pat<(nxv16i8 (AArch64mul_m1 nxv16i1:$Op1, nxv16i8:$Op2, (nxv16i8 (splat_vector (i32 -1))))), + (NEG_ZPmZ_B $Op2, $Op1, $Op2)>; + def : Pat<(nxv8i16 (AArch64mul_m1 nxv8i1:$Op1, nxv8i16:$Op2, (nxv8i16 (splat_vector (i32 -1))))), + (NEG_ZPmZ_H $Op2, $Op1, $Op2)>; + def : Pat<(nxv4i32 (AArch64mul_m1 nxv4i1:$Op1, nxv4i32:$Op2, (nxv4i32 (splat_vector (i32 -1))))), + (NEG_ZPmZ_S $Op2, $Op1, $Op2)>; + def : Pat<(nxv2i64 (AArch64mul_m1 nxv2i1:$Op1, nxv2i64:$Op2, (nxv2i64 (splat_vector (i64 -1))))), + (NEG_ZPmZ_D $Op2, $Op1, $Op2)>; + + let AddedComplexity = 5 in { + def : Pat<(nxv16i8 (AArch64mul_p nxv16i1:$Op1, nxv16i8:$Op2, (nxv16i8 (splat_vector (i32 -1))))), + (NEG_ZPmZ_B_UNDEF $Op2, $Op1, $Op2)>; + def : Pat<(nxv8i16 (AArch64mul_p nxv8i1:$Op1, nxv8i16:$Op2, (nxv8i16 (splat_vector (i32 -1))))), + (NEG_ZPmZ_H_UNDEF $Op2, $Op1, $Op2)>; + def : Pat<(nxv4i32 (AArch64mul_p nxv4i1:$Op1, nxv4i32:$Op2, (nxv4i32 (splat_vector (i32 -1))))), + (NEG_ZPmZ_S_UNDEF $Op2, $Op1, $Op2)>; + def : Pat<(nxv2i64 (AArch64mul_p nxv2i1:$Op1, nxv2i64:$Op2, (nxv2i64 (splat_vector (i64 -1))))), + (NEG_ZPmZ_D_UNDEF $Op2, $Op1, $Op2)>; + } + + def : Pat<(nxv16i8 (AArch64mul_m1 nxv16i1:$Op1, (nxv16i8 (splat_vector (i32 -1))), nxv16i8:$Op2)), + (NEG_ZPmZ_B (DUP_ZI_B -1, 0), $Op1, $Op2)>; + def : Pat<(nxv8i16 (AArch64mul_m1 nxv8i1:$Op1, (nxv8i16 (splat_vector (i32 -1))), nxv8i16:$Op2)), + (NEG_ZPmZ_H (DUP_ZI_H -1, 0), $Op1, $Op2)>; + def : Pat<(nxv4i32 (AArch64mul_m1 nxv4i1:$Op1, (nxv4i32 (splat_vector (i32 -1))), nxv4i32:$Op2)), + (NEG_ZPmZ_S (DUP_ZI_S -1, 0), $Op1, $Op2)>; + def : Pat<(nxv2i64 (AArch64mul_m1 nxv2i1:$Op1, (nxv2i64 (splat_vector (i64 -1))), nxv2i64:$Op2)), + (NEG_ZPmZ_D (DUP_ZI_D -1, 0), $Op1, $Op2)>; } // End HasSVE_or_SME // COMPACT - word and doubleword diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 8c4b4f6..50a8754 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -5632,75 +5632,94 @@ InstructionCost AArch64TTIImpl::getPartialReductionCost( TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp, TTI::TargetCostKind CostKind) const { InstructionCost Invalid = InstructionCost::getInvalid(); - InstructionCost Cost(TTI::TCC_Basic); if (CostKind != TTI::TCK_RecipThroughput) return Invalid; - // Sub opcodes currently only occur in chained cases. - // Independent partial reduction subtractions are still costed as an add + if (VF.isFixed() && !ST->isSVEorStreamingSVEAvailable() && + (!ST->isNeonAvailable() || !ST->hasDotProd())) + return Invalid; + if ((Opcode != Instruction::Add && Opcode != Instruction::Sub) || OpAExtend == TTI::PR_None) return Invalid; + assert((BinOp || (OpBExtend == TTI::PR_None && !InputTypeB)) && + (!BinOp || (OpBExtend != TTI::PR_None && InputTypeB)) && + "Unexpected values for OpBExtend or InputTypeB"); + // We only support multiply binary operations for now, and for muls we // require the types being extended to be the same. - // NOTE: For muls AArch64 supports lowering mixed extensions to a usdot but - // only if the i8mm or sve/streaming features are available. - if (BinOp && (*BinOp != Instruction::Mul || InputTypeA != InputTypeB || - OpBExtend == TTI::PR_None || - (OpAExtend != OpBExtend && !ST->hasMatMulInt8() && - !ST->isSVEorStreamingSVEAvailable()))) + if (BinOp && (*BinOp != Instruction::Mul || InputTypeA != InputTypeB)) return Invalid; - assert((BinOp || (OpBExtend == TTI::PR_None && !InputTypeB)) && - "Unexpected values for OpBExtend or InputTypeB"); - EVT InputEVT = EVT::getEVT(InputTypeA); - EVT AccumEVT = EVT::getEVT(AccumType); + bool IsUSDot = OpBExtend != TTI::PR_None && OpAExtend != OpBExtend; + if (IsUSDot && !ST->hasMatMulInt8()) + return Invalid; + + unsigned Ratio = + AccumType->getScalarSizeInBits() / InputTypeA->getScalarSizeInBits(); + if (VF.getKnownMinValue() <= Ratio) + return Invalid; + + VectorType *InputVectorType = VectorType::get(InputTypeA, VF); + VectorType *AccumVectorType = + VectorType::get(AccumType, VF.divideCoefficientBy(Ratio)); + // We don't yet support all kinds of legalization. + auto TA = TLI->getTypeAction(AccumVectorType->getContext(), + EVT::getEVT(AccumVectorType)); + switch (TA) { + default: + return Invalid; + case TargetLowering::TypeLegal: + case TargetLowering::TypePromoteInteger: + case TargetLowering::TypeSplitVector: + break; + } + + // Check what kind of type-legalisation happens. + std::pair<InstructionCost, MVT> AccumLT = + getTypeLegalizationCost(AccumVectorType); + std::pair<InstructionCost, MVT> InputLT = + getTypeLegalizationCost(InputVectorType); - unsigned VFMinValue = VF.getKnownMinValue(); + InstructionCost Cost = InputLT.first * TTI::TCC_Basic; - if (VF.isScalable()) { - if (!ST->isSVEorStreamingSVEAvailable()) - return Invalid; + // Prefer using full types by costing half-full input types as more expensive. + if (TypeSize::isKnownLT(InputVectorType->getPrimitiveSizeInBits(), + TypeSize::getScalable(128))) + // FIXME: This can be removed after the cost of the extends are folded into + // the dot-product expression in VPlan, after landing: + // https://github.com/llvm/llvm-project/pull/147302 + Cost *= 2; - // Don't accept a partial reduction if the scaled accumulator is vscale x 1, - // since we can't lower that type. - unsigned Scale = - AccumEVT.getScalarSizeInBits() / InputEVT.getScalarSizeInBits(); - if (VFMinValue == Scale) - return Invalid; + if (ST->isSVEorStreamingSVEAvailable() && !IsUSDot) { + // i16 -> i64 is natively supported for udot/sdot + if (AccumLT.second.getScalarType() == MVT::i64 && + InputLT.second.getScalarType() == MVT::i16) + return Cost; + // i8 -> i64 is supported with an extra level of extends + if (AccumLT.second.getScalarType() == MVT::i64 && + InputLT.second.getScalarType() == MVT::i8) + // FIXME: This cost should probably be a little higher, e.g. Cost + 2 + // because it requires two extra extends on the inputs. But if we'd change + // that now, a regular reduction would be cheaper because the costs of + // the extends in the IR are still counted. This can be fixed + // after https://github.com/llvm/llvm-project/pull/147302 has landed. + return Cost; } - if (VF.isFixed() && - (!ST->isNeonAvailable() || !ST->hasDotProd() || AccumEVT == MVT::i64)) - return Invalid; - if (InputEVT == MVT::i8) { - switch (VFMinValue) { - default: - return Invalid; - case 8: - if (AccumEVT == MVT::i32) - Cost *= 2; - else if (AccumEVT != MVT::i64) - return Invalid; - break; - case 16: - if (AccumEVT == MVT::i64) - Cost *= 2; - else if (AccumEVT != MVT::i32) - return Invalid; - break; - } - } else if (InputEVT == MVT::i16) { - // FIXME: Allow i32 accumulator but increase cost, as we would extend - // it to i64. - if (VFMinValue != 8 || AccumEVT != MVT::i64) - return Invalid; - } else - return Invalid; + // i8 -> i32 is natively supported for udot/sdot/usdot, both for NEON and SVE. + if (ST->isSVEorStreamingSVEAvailable() || + (AccumLT.second.isFixedLengthVector() && ST->isNeonAvailable() && + ST->hasDotProd())) { + if (AccumLT.second.getScalarType() == MVT::i32 && + InputLT.second.getScalarType() == MVT::i8) + return Cost; + } - return Cost; + // Add additional cost for the extends that would need to be inserted. + return Cost + 4; } InstructionCost diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td index 7003a40..9446144 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -2126,6 +2126,8 @@ def FeatureISAVersion12_50 : FeatureSet< FeatureLdsBarrierArriveAtomic, FeatureSetPrioIncWgInst, Feature45BitNumRecordsBufferResource, + FeatureSupportsXNACK, + FeatureXNACK, ]>; def FeatureISAVersion12_51 : FeatureSet< diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp index 2ba3156..9dd64e0 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp @@ -131,10 +131,8 @@ static bool isDSAddress(const Constant *C) { return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS; } -/// Returns true if the function requires the implicit argument be passed -/// regardless of the function contents. -static bool funcRequiresHostcallPtr(const Function &F) { - // Sanitizers require the hostcall buffer passed in the implicit arguments. +/// Returns true if sanitizer attributes are present on a function. +static bool hasSanitizerAttributes(const Function &F) { return F.hasFnAttribute(Attribute::SanitizeAddress) || F.hasFnAttribute(Attribute::SanitizeThread) || F.hasFnAttribute(Attribute::SanitizeMemory) || @@ -469,15 +467,21 @@ struct AAAMDAttributesFunction : public AAAMDAttributes { // If the function requires the implicit arg pointer due to sanitizers, // assume it's needed even if explicitly marked as not requiring it. - const bool NeedsHostcall = funcRequiresHostcallPtr(*F); - if (NeedsHostcall) { + // Flat scratch initialization is needed because `asan_malloc_impl` + // calls introduced later in pipeline will have flat scratch accesses. + // FIXME: FLAT_SCRATCH_INIT will not be required here if device-libs + // implementation for `asan_malloc_impl` is updated. + const bool HasSanitizerAttrs = hasSanitizerAttributes(*F); + if (HasSanitizerAttrs) { removeAssumedBits(IMPLICIT_ARG_PTR); removeAssumedBits(HOSTCALL_PTR); + removeAssumedBits(FLAT_SCRATCH_INIT); } for (auto Attr : ImplicitAttrs) { - if (NeedsHostcall && - (Attr.first == IMPLICIT_ARG_PTR || Attr.first == HOSTCALL_PTR)) + if (HasSanitizerAttrs && + (Attr.first == IMPLICIT_ARG_PTR || Attr.first == HOSTCALL_PTR || + Attr.first == FLAT_SCRATCH_INIT)) continue; if (F->hasFnAttribute(Attr.second)) diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp index 6efa78e..a4ef524 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -608,8 +608,6 @@ public: ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot] : EmptySet; - const size_t HybridModuleRootKernelsSize = HybridModuleRootKernels.size(); - for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) { // Each iteration of this loop assigns exactly one global variable to // exactly one of the implementation strategies. @@ -649,8 +647,7 @@ public: ModuleScopeVariables.insert(GV); } else if (K.second.size() == 1) { KernelAccessVariables.insert(GV); - } else if (K.second.size() == HybridModuleRootKernelsSize && - set_is_subset(K.second, HybridModuleRootKernels)) { + } else if (K.second == HybridModuleRootKernels) { ModuleScopeVariables.insert(GV); } else { TableLookupVariables.insert(GV); diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index 2d5ae29..2120bf8 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -2303,7 +2303,10 @@ Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1( KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n'; if (!hasArchitectedFlatScratch()) KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n'; - KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n'; + bool ReservedXnackMask = STI.hasFeature(AMDGPU::FeatureXNACK); + assert(!ReservedXnackMask || STI.hasFeature(AMDGPU::FeatureSupportsXNACK)); + KdStream << Indent << ".amdhsa_reserve_xnack_mask " << ReservedXnackMask + << '\n'; KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n"; CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY); diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index fed3778..82789bc 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -722,7 +722,8 @@ bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const { return false; } - if (!MRI->constrainRegClass(New->getReg(), ConstrainRC)) { + if (New->getReg().isVirtual() && + !MRI->constrainRegClass(New->getReg(), ConstrainRC)) { LLVM_DEBUG(dbgs() << "Cannot constrain " << printReg(New->getReg(), TRI) << TRI->getRegClassName(ConstrainRC) << '\n'); return false; diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index e4b3528..0189e7b 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -306,7 +306,8 @@ class PrologEpilogSGPRSpillBuilder { buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL, TmpVGPR, FI, FrameReg, DwordOff); - MRI.constrainRegClass(SubReg, &AMDGPU::SReg_32_XM0RegClass); + assert(SubReg.isPhysical()); + BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg) .addReg(TmpVGPR, RegState::Kill); DwordOff += 4; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index f7265c5..e233457 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -18860,31 +18860,6 @@ SITargetLowering::getTargetMMOFlags(const Instruction &I) const { return Flags; } -bool SITargetLowering::checkForPhysRegDependency( - SDNode *Def, SDNode *User, unsigned Op, const TargetRegisterInfo *TRI, - const TargetInstrInfo *TII, MCRegister &PhysReg, int &Cost) const { - if (User->getOpcode() != ISD::CopyToReg) - return false; - if (!Def->isMachineOpcode()) - return false; - MachineSDNode *MDef = dyn_cast<MachineSDNode>(Def); - if (!MDef) - return false; - - unsigned ResNo = User->getOperand(Op).getResNo(); - if (User->getOperand(Op)->getValueType(ResNo) != MVT::i1) - return false; - const MCInstrDesc &II = TII->get(MDef->getMachineOpcode()); - if (II.isCompare() && II.hasImplicitDefOfPhysReg(AMDGPU::SCC)) { - PhysReg = AMDGPU::SCC; - const TargetRegisterClass *RC = - TRI->getMinimalPhysRegClass(PhysReg, Def->getSimpleValueType(ResNo)); - Cost = RC->getCopyCost(); - return true; - } - return false; -} - void SITargetLowering::emitExpandAtomicAddrSpacePredicate( Instruction *AI) const { // Given: atomicrmw fadd ptr %addr, float %val ordering diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index a474dab..74e58f4 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -561,11 +561,6 @@ public: bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const; bool denormalsEnabledForType(LLT Ty, const MachineFunction &MF) const; - bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, - const TargetRegisterInfo *TRI, - const TargetInstrInfo *TII, - MCRegister &PhysReg, int &Cost) const override; - bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN = false, unsigned Depth = 0) const override; diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 76bfce8..5e27b37 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1013,6 +1013,15 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { } } } else if (T == X_CNT) { + WaitEventType OtherEvent = E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP; + if (PendingEvents & (1 << OtherEvent)) { + // Hardware inserts an implicit xcnt between interleaved + // SMEM and VMEM operations. So there will never be + // outstanding address translations for both SMEM and + // VMEM at the same time. + setScoreLB(T, CurrScore - 1); + PendingEvents &= ~(1 << OtherEvent); + } for (const MachineOperand &Op : Inst.all_uses()) setScoreByOperand(&Inst, Op, T, CurrScore); } else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ { @@ -2220,6 +2229,8 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, // Now look at the instruction opcode. If it is a memory access // instruction, update the upper-bound of the appropriate counter's // bracket and the destination operand scores. + // For architectures with X_CNT, mark the source address operands + // with the appropriate counter values. // TODO: Use the (TSFlags & SIInstrFlags::DS_CNT) property everywhere. bool IsVMEMAccess = false; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 56435a5..cda8069 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2112,8 +2112,6 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case AMDGPU::SI_RESTORE_S32_FROM_VGPR: MI.setDesc(get(AMDGPU::V_READLANE_B32)); - MI.getMF()->getRegInfo().constrainRegClass(MI.getOperand(0).getReg(), - &AMDGPU::SReg_32_XM0RegClass); break; case AMDGPU::AV_MOV_B32_IMM_PSEUDO: { Register Dst = MI.getOperand(0).getReg(); @@ -8117,21 +8115,14 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist, // hope for the best. if (Inst.isCopy() && DstReg.isPhysical() && RI.isVGPR(MRI, Inst.getOperand(1).getReg())) { - // TODO: Only works for 32 bit registers. - if (MRI.constrainRegClass(DstReg, &AMDGPU::SReg_32_XM0RegClass)) { - BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), - get(AMDGPU::V_READFIRSTLANE_B32), DstReg) - .add(Inst.getOperand(1)); - } else { - Register NewDst = - MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), - get(AMDGPU::V_READFIRSTLANE_B32), NewDst) - .add(Inst.getOperand(1)); - BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), - DstReg) - .addReg(NewDst); - } + Register NewDst = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), + get(AMDGPU::V_READFIRSTLANE_B32), NewDst) + .add(Inst.getOperand(1)); + BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), + DstReg) + .addReg(NewDst); + Inst.eraseFromParent(); return; } diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 205237f..3c2dd42 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -2222,8 +2222,6 @@ bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI, // Don't need to write VGPR out. } - MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); - // Restore clobbered registers in the specified restore block. MI = RestoreMBB.end(); SB.setMI(&RestoreMBB, MI); @@ -2238,7 +2236,8 @@ bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI, SB.NumSubRegs == 1 ? SB.SuperReg : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); - MRI.constrainRegClass(SubReg, &AMDGPU::SReg_32_XM0RegClass); + + assert(SubReg.isPhysical()); bool LastSubReg = (i + 1 == e); auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), SubReg) @@ -3059,8 +3058,7 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, if (IsSALU && LiveSCC) { Register NewDest; if (IsCopy) { - MF->getRegInfo().constrainRegClass(ResultReg, - &AMDGPU::SReg_32_XM0RegClass); + assert(ResultReg.isPhysical()); NewDest = ResultReg; } else { NewDest = RS->scavengeRegisterBackwards(AMDGPU::SReg_32_XM0RegClass, @@ -3190,8 +3188,6 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, Register NewDest; if (IsCopy) { - MF->getRegInfo().constrainRegClass(ResultReg, - &AMDGPU::SReg_32_XM0RegClass); NewDest = ResultReg; } else { NewDest = RS->scavengeRegisterBackwards( diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp index d0dfa47..a94e131 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -359,6 +359,8 @@ HexagonTargetLowering::initializeHVXLowering() { setCondCodeAction(ISD::SETULE, MVT::v64f16, Expand); setCondCodeAction(ISD::SETUGE, MVT::v64f16, Expand); setCondCodeAction(ISD::SETULT, MVT::v64f16, Expand); + setCondCodeAction(ISD::SETUO, MVT::v64f16, Expand); + setCondCodeAction(ISD::SETO, MVT::v64f16, Expand); setCondCodeAction(ISD::SETNE, MVT::v32f32, Expand); setCondCodeAction(ISD::SETLE, MVT::v32f32, Expand); @@ -372,6 +374,8 @@ HexagonTargetLowering::initializeHVXLowering() { setCondCodeAction(ISD::SETULE, MVT::v32f32, Expand); setCondCodeAction(ISD::SETUGE, MVT::v32f32, Expand); setCondCodeAction(ISD::SETULT, MVT::v32f32, Expand); + setCondCodeAction(ISD::SETUO, MVT::v32f32, Expand); + setCondCodeAction(ISD::SETO, MVT::v32f32, Expand); // Boolean vectors. @@ -449,6 +453,7 @@ HexagonTargetLowering::initializeHVXLowering() { // Include cases which are not hander earlier setOperationAction(ISD::UINT_TO_FP, MVT::v32i1, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v64i1, Custom); + setOperationAction(ISD::SINT_TO_FP, MVT::v32i1, Custom); setTargetDAGCombine({ISD::CONCAT_VECTORS, ISD::TRUNCATE, ISD::VSELECT}); } @@ -2337,7 +2342,7 @@ HexagonTargetLowering::LowerHvxFpToInt(SDValue Op, SelectionDAG &DAG) const { return ExpandHvxFpToInt(Op, DAG); } -// For vector type v32i1 uint_to_fp to v32f32: +// For vector type v32i1 uint_to_fp/sint_to_fp to v32f32: // R1 = #1, R2 holds the v32i1 param // V1 = vsplat(R1) // V2 = vsplat(R2) @@ -2464,7 +2469,7 @@ HexagonTargetLowering::LowerHvxIntToFp(SDValue Op, SelectionDAG &DAG) const { MVT IntTy = ty(Op.getOperand(0)).getVectorElementType(); MVT FpTy = ResTy.getVectorElementType(); - if (Op.getOpcode() == ISD::UINT_TO_FP) { + if (Op.getOpcode() == ISD::UINT_TO_FP || Op.getOpcode() == ISD::SINT_TO_FP) { if (ResTy == MVT::v32f32 && ty(Op.getOperand(0)) == MVT::v32i1) return LowerHvxPred32ToFp(Op, DAG); if (ResTy == MVT::v64f16 && ty(Op.getOperand(0)) == MVT::v64i1) diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 4cfbfca..7ddf996 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -2860,8 +2860,7 @@ static SDValue fillSubVectorFromBuildVector(BuildVectorSDNode *Node, EVT ResTy, unsigned first) { unsigned NumElts = ResTy.getVectorNumElements(); - assert(first >= 0 && - first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements()); + assert(first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements()); SmallVector<SDValue, 16> Ops(Node->op_begin() + first, Node->op_begin() + first + NumElts); diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 395d2c4..662d3f6 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -629,7 +629,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2, G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH, - G_FTANH}) + G_FTANH, G_FMODF}) .libcallFor({s32, s64}) .libcallFor(ST.is64Bit(), {s128}); getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP}) diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 7d4535a..b37b740 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1560,7 +1560,7 @@ static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI, MCRegister BaseReg = TRI.getSubReg(Reg, RISCV::sub_vrm1_0); // If it's not a grouped vector register, it doesn't have subregister, so // the base register is just itself. - if (BaseReg == RISCV::NoRegister) + if (!BaseReg.isValid()) BaseReg = Reg; return BaseReg; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 50649cf..dcce2d2 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -533,7 +533,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::FREM, ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FEXP10, ISD::FLOG, ISD::FLOG2, - ISD::FLOG10, ISD::FLDEXP, ISD::FFREXP}, + ISD::FLOG10, ISD::FLDEXP, ISD::FFREXP, ISD::FMODF}, MVT::f16, Promote); // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 6a6ead2..cf8d120 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -128,7 +128,7 @@ static bool hasUndefinedPassthru(const MachineInstr &MI) { // All undefined passthrus should be $noreg: see // RISCVDAGToDAGISel::doPeepholeNoRegPassThru const MachineOperand &UseMO = MI.getOperand(UseOpIdx); - return UseMO.getReg() == RISCV::NoRegister || UseMO.isUndef(); + return !UseMO.getReg().isValid() || UseMO.isUndef(); } /// Return true if \p MI is a copy that will be lowered to one or more vmvNr.vs. @@ -1454,7 +1454,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) { Register Reg = VLOp.getReg(); // Erase the AVL operand from the instruction. - VLOp.setReg(RISCV::NoRegister); + VLOp.setReg(Register()); VLOp.setIsKill(false); if (LIS) { LiveInterval &LI = LIS->getInterval(Reg); @@ -1663,7 +1663,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const { if (!MO.isReg() || !MO.getReg().isVirtual()) return; Register OldVLReg = MO.getReg(); - MO.setReg(RISCV::NoRegister); + MO.setReg(Register()); if (LIS) LIS->shrinkToUses(&LIS->getInterval(OldVLReg)); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 1e6b04f8..7db4832 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1364,7 +1364,7 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(), /*RestoreAfter=*/false, /*SpAdj=*/0, /*AllowSpill=*/false); - if (TmpGPR != RISCV::NoRegister) + if (TmpGPR.isValid()) RS->setRegUsed(TmpGPR); else { // The case when there is no scavenged register needs special handling. @@ -3021,7 +3021,7 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, ErrInfo = "Invalid operand type for VL operand"; return false; } - if (Op.isReg() && Op.getReg() != RISCV::NoRegister) { + if (Op.isReg() && Op.getReg().isValid()) { const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); auto *RC = MRI.getRegClass(Op.getReg()); if (!RISCV::GPRRegClass.hasSubClassEq(RC)) { diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp index f8d33ae..54569b1 100644 --- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp +++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp @@ -259,7 +259,7 @@ static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) { if (isCompressibleLoad(MI) || isCompressibleStore(MI)) { const MachineOperand &MOImm = MI.getOperand(2); if (!MOImm.isImm()) - return RegImmPair(RISCV::NoRegister, 0); + return RegImmPair(Register(), 0); int64_t Offset = MOImm.getImm(); int64_t NewBaseAdjust = getBaseAdjustForCompression(Offset, Opcode); @@ -292,7 +292,7 @@ static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) { } } } - return RegImmPair(RISCV::NoRegister, 0); + return RegImmPair(Register(), 0); } // Check all uses after FirstMI of the given register, keeping a vector of diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp index ffba284..fdf9a4f 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp @@ -382,7 +382,7 @@ bool RISCVVectorPeephole::convertAllOnesVMergeToVMv(MachineInstr &MI) const { // vmv.v.v doesn't have a mask operand, so we may be able to inflate the // register class for the destination and passthru operands e.g. VRNoV0 -> VR MRI->recomputeRegClass(MI.getOperand(0).getReg()); - if (MI.getOperand(1).getReg() != RISCV::NoRegister) + if (MI.getOperand(1).getReg().isValid()) MRI->recomputeRegClass(MI.getOperand(1).getReg()); return true; } @@ -448,7 +448,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) { Register FalseReg = MI.getOperand(2).getReg(); if (TruePassthruReg != FalseReg) { // If True's passthru is undef see if we can change it to False - if (TruePassthruReg != RISCV::NoRegister || + if (TruePassthruReg.isValid() || !MRI->hasOneUse(MI.getOperand(3).getReg()) || !ensureDominates(MI.getOperand(2), *True)) return false; @@ -467,7 +467,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) { // vmv.v.v doesn't have a mask operand, so we may be able to inflate the // register class for the destination and passthru operands e.g. VRNoV0 -> VR MRI->recomputeRegClass(MI.getOperand(0).getReg()); - if (MI.getOperand(1).getReg() != RISCV::NoRegister) + if (MI.getOperand(1).getReg().isValid()) MRI->recomputeRegClass(MI.getOperand(1).getReg()); return true; } @@ -517,7 +517,7 @@ bool RISCVVectorPeephole::convertToUnmasked(MachineInstr &MI) const { if (RISCVII::isFirstDefTiedToFirstUse(MaskedMCID)) { unsigned PassthruOpIdx = MI.getNumExplicitDefs(); if (HasPassthru) { - if (MI.getOperand(PassthruOpIdx).getReg() != RISCV::NoRegister) + if (MI.getOperand(PassthruOpIdx).getReg()) MRI->recomputeRegClass(MI.getOperand(PassthruOpIdx).getReg()); } else MI.removeOperand(PassthruOpIdx); @@ -576,7 +576,7 @@ static bool dominates(MachineBasicBlock::const_iterator A, bool RISCVVectorPeephole::ensureDominates(const MachineOperand &MO, MachineInstr &Src) const { assert(MO.getParent()->getParent() == Src.getParent()); - if (!MO.isReg() || MO.getReg() == RISCV::NoRegister) + if (!MO.isReg() || !MO.getReg().isValid()) return true; MachineInstr *Def = MRI->getVRegDef(MO.getReg()); @@ -593,7 +593,7 @@ bool RISCVVectorPeephole::ensureDominates(const MachineOperand &MO, bool RISCVVectorPeephole::foldUndefPassthruVMV_V_V(MachineInstr &MI) { if (RISCV::getRVVMCOpcode(MI.getOpcode()) != RISCV::VMV_V_V) return false; - if (MI.getOperand(1).getReg() != RISCV::NoRegister) + if (MI.getOperand(1).getReg().isValid()) return false; // If the input was a pseudo with a policy operand, we can give it a tail @@ -654,7 +654,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) { // Src needs to have the same passthru as VMV_V_V MachineOperand &SrcPassthru = Src->getOperand(Src->getNumExplicitDefs()); - if (SrcPassthru.getReg() != RISCV::NoRegister && + if (SrcPassthru.getReg().isValid() && SrcPassthru.getReg() != Passthru.getReg()) return false; @@ -672,7 +672,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) { if (SrcPassthru.getReg() != Passthru.getReg()) { SrcPassthru.setReg(Passthru.getReg()); // If Src is masked then its passthru needs to be in VRNoV0. - if (Passthru.getReg() != RISCV::NoRegister) + if (Passthru.getReg().isValid()) MRI->constrainRegClass( Passthru.getReg(), TII->getRegClass(Src->getDesc(), SrcPassthru.getOperandNo(), TRI)); diff --git a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp index 6c19049..024030d 100644 --- a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp @@ -206,8 +206,8 @@ Filler::findDelayInstr(MachineBasicBlock &MBB, if (!done) --I; - // skip debug instruction - if (I->isDebugInstr()) + // Skip meta instructions. + if (I->isMetaInstruction()) continue; if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isPosition() || diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index 143c4c4..e7709ef 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -149,6 +149,10 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, }); } + getActionDefinitionsBuilder({G_UMIN, G_UMAX, G_SMIN, G_SMAX}) + .widenScalarToNextPow2(0, /*Min=*/32) + .lower(); + // integer addition/subtraction getActionDefinitionsBuilder({G_ADD, G_SUB}) .legalFor({s8, s16, s32}) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 3802506..931a10b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -13783,10 +13783,12 @@ static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask, // so prevents folding a load into this instruction or making a copy. const int UnpackLoMask[] = {0, 0, 1, 1}; const int UnpackHiMask[] = {2, 2, 3, 3}; - if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2)) - Mask = UnpackLoMask; - else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2)) - Mask = UnpackHiMask; + if (!isSingleElementRepeatedMask(Mask)) { + if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2)) + Mask = UnpackLoMask; + else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2)) + Mask = UnpackHiMask; + } return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1, getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); @@ -58135,6 +58137,14 @@ static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget)) return V; + // Prefer VSHLI to reduce uses, X86FixupInstTunings may revert this depending + // on the scheduler model. Limit multiple users to AVX+ targets to prevent + // introducing extra register moves. + if (Op0 == Op1 && supportedVectorShiftWithImm(VT, Subtarget, ISD::SHL)) + if (Subtarget.hasAVX() || N->isOnlyUserOf(Op0.getNode())) + return getTargetVShiftByConstNode(X86ISD::VSHLI, DL, VT.getSimpleVT(), + Op0, 1, DAG); + // Canonicalize hidden LEA pattern: // Fold (add (sub (shl x, c), y), z) -> (sub (add (shl x, c), z), y) // iff c < 4 diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index cf6d0ec..e1e24a9 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -318,18 +318,18 @@ Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { // * Single constant active lane -> store // * Narrow width by halfs excluding zero/undef lanes Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { + Value *StorePtr = II.getArgOperand(1); + Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); if (!ConstMask) return nullptr; // If the mask is all zeros, this instruction does nothing. - if (ConstMask->isNullValue()) + if (maskIsAllZeroOrUndef(ConstMask)) return eraseInstFromFunction(II); // If the mask is all ones, this is a plain vector store of the 1st argument. - if (ConstMask->isAllOnesValue()) { - Value *StorePtr = II.getArgOperand(1); - Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); + if (maskIsAllOneOrUndef(ConstMask)) { StoreInst *S = new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); S->copyMetadata(II); @@ -389,7 +389,7 @@ Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { return nullptr; // If the mask is all zeros, a scatter does nothing. - if (ConstMask->isNullValue()) + if (maskIsAllZeroOrUndef(ConstMask)) return eraseInstFromFunction(II); // Vector splat address -> scalar store diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 26e17cc..b9b5b58 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -2287,6 +2287,35 @@ bool GVNPass::processLoad(LoadInst *L) { return true; } +// Attempt to process masked loads which have loaded from +// masked stores with the same mask +bool GVNPass::processMaskedLoad(IntrinsicInst *I) { + if (!MD) + return false; + MemDepResult Dep = MD->getDependency(I); + Instruction *DepInst = Dep.getInst(); + if (!DepInst || !Dep.isLocal() || !Dep.isDef()) + return false; + + Value *Mask = I->getOperand(2); + Value *Passthrough = I->getOperand(3); + Value *StoreVal; + if (!match(DepInst, m_MaskedStore(m_Value(StoreVal), m_Value(), m_Value(), + m_Specific(Mask))) || + StoreVal->getType() != I->getType()) + return false; + + // Remove the load but generate a select for the passthrough + Value *OpToForward = llvm::SelectInst::Create(Mask, StoreVal, Passthrough, "", + I->getIterator()); + + ICF->removeUsersOf(I); + I->replaceAllUsesWith(OpToForward); + salvageAndRemoveInstruction(I); + ++NumGVNLoad; + return true; +} + /// Return a pair the first field showing the value number of \p Exp and the /// second field showing whether it is a value number newly created. std::pair<uint32_t, bool> @@ -2734,6 +2763,10 @@ bool GVNPass::processInstruction(Instruction *I) { return false; } + if (match(I, m_Intrinsic<Intrinsic::masked_load>()) && + processMaskedLoad(cast<IntrinsicInst>(I))) + return true; + // For conditional branches, we can perform simple conditional propagation on // the condition value itself. if (BranchInst *BI = dyn_cast<BranchInst>(I)) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 7750687..cb6bfb2 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -8694,7 +8694,7 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) { Plan->addVF(VF); if (!VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, + *Plan, [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index f76777b..ca63bf3 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -45,13 +45,13 @@ static cl::opt<bool> EnableWideActiveLaneMask( cl::desc("Enable use of wide get active lane mask instructions")); bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - VPlanPtr &Plan, + VPlan &Plan, function_ref<const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI) { ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT( - Plan->getVectorLoopRegion()); + Plan.getVectorLoopRegion()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) { // Skip blocks outside region if (!VPBB->getParent()) @@ -77,11 +77,11 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( for (VPValue *Op : PhiR->operands()) NewRecipe->addOperand(Op); } else { - VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue()); + VPValue *Start = Plan.getOrAddLiveIn(II->getStartValue()); VPValue *Step = - vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep()); + vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep()); NewRecipe = new VPWidenIntOrFpInductionRecipe( - Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc()); + Phi, Start, Step, &Plan.getVF(), *II, Ingredient.getDebugLoc()); } } else { assert(isa<VPInstruction>(&Ingredient) && diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 4c65cb7..2f00e51 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -138,7 +138,7 @@ struct VPlanTransforms { /// widen recipes. Returns false if any VPInstructions could not be converted /// to a wide recipe if needed. LLVM_ABI_FOR_TEST static bool tryToConvertVPInstructionsToVPRecipes( - VPlanPtr &Plan, + VPlan &Plan, function_ref<const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI); diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt index 6f98eae..8399292 100644 --- a/llvm/runtimes/CMakeLists.txt +++ b/llvm/runtimes/CMakeLists.txt @@ -507,14 +507,10 @@ if(build_runtimes) endif() # Forward user-provived system configuration to runtimes for requirement introspection. - # CMAKE_PREFIX_PATH is the search path for CMake packages. In order to pass through - # the command line interface, the CMake semicolon separator needs to be replaced - # with $<SEMICOLON> + # CMAKE_PREFIX_PATH is the search path for CMake packages. if(CMAKE_PREFIX_PATH) - string(JOIN "$<SEMICOLON>" escaped_cmake_prefix_path ${CMAKE_PREFIX_PATH}) - list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${escaped_cmake_prefix_path}") + list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}") endif() - # CMAKE_PROGRAM_PATH is the search path for executables such as python. if(CMAKE_PROGRAM_PATH) list(APPEND extra_cmake_args "-DCMAKE_PROGRAM_PATH=${CMAKE_PROGRAM_PATH}") diff --git a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll index 6d9aa8d..05aad8a 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll @@ -770,10 +770,10 @@ define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_kno ; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group GRP0: -; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: (Low: %B High: (2000 + %B)) ; CHECK-NEXT: Member: {%B,+,4}<nuw><%loop.header> ; CHECK-NEXT: Group GRP1: -; CHECK-NEXT: (Low: %A High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: (Low: %A High: (2000 + %A)) ; CHECK-NEXT: Member: {%A,+,4}<nuw><%loop.header> ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. diff --git a/llvm/test/Bitcode/DW_OP_rot_neg_abs.ll b/llvm/test/Bitcode/DW_OP_rot_neg_abs.ll new file mode 100644 index 0000000..e185530 --- /dev/null +++ b/llvm/test/Bitcode/DW_OP_rot_neg_abs.ll @@ -0,0 +1,10 @@ +;; This test checks the validity of DWARF operators DW_OP_rot, DW_OP_neg, and DW_OP_abs. + +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s + +; CHECK: !DIExpression(DW_OP_push_object_address, DW_OP_lit0, DW_OP_lit0, DW_OP_neg, DW_OP_abs, DW_OP_rot, DW_OP_rot, DW_OP_rot, DW_OP_plus, DW_OP_plus) + +; ModuleID = 'DW_OP_rot_neg_abs.adb' +source_filename = "/dir/DW_OP_rot_neg_abs.ll" + +!named = !{!DIExpression(DW_OP_push_object_address, DW_OP_lit0, DW_OP_lit0, DW_OP_neg, DW_OP_abs, DW_OP_rot, DW_OP_rot, DW_OP_rot, DW_OP_plus, DW_OP_plus)} diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll index 1de8d0a..01e3d3a 100644 --- a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll +++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll @@ -68,13 +68,12 @@ entry: } ; SVE calling conventions -; Predicate register spills end up in FP region, currently. This can be -; mitigated with the -aarch64-enable-zpr-predicate-spills option. +; Padding is placed between predicate and fpr/zpr register spills, so only emit remarks when hazard padding is off. +; Note: The -aarch64-enable-zpr-predicate-spills option is deprecated (and will be removed soon). define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 { ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale] ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] -; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-1088-258 * vscale] is too close to FPR stack object at [SP-1088-256 * vscale] ; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call': ; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object ; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] @@ -89,7 +88,6 @@ entry: define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 { ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale] ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] -; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-1088-258 * vscale] is too close to FPR stack object at [SP-1088-256 * vscale] ; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': ; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object ; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll index 333a8be..bdee359 100644 --- a/llvm/test/CodeGen/AArch64/stack-hazard.ll +++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=0 | FileCheck %s --check-prefixes=CHECK,CHECK0 ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=64 | FileCheck %s --check-prefixes=CHECK,CHECK64 -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects=false -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE define i32 @basic(i32 noundef %num) { ; CHECK-LABEL: basic: @@ -1940,23 +1940,22 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; ; CHECK64-LABEL: svecc_call: ; CHECK64: // %bb.0: // %entry -; CHECK64-NEXT: sub sp, sp, #128 -; CHECK64-NEXT: .cfi_def_cfa_offset 128 +; CHECK64-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_def_cfa_offset 64 ; CHECK64-NEXT: cntd x9 -; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill -; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill -; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill -; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill -; CHECK64-NEXT: add x29, sp, #64 +; CHECK64-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK64-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK64-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK64-NEXT: mov x29, sp ; CHECK64-NEXT: .cfi_def_cfa w29, 64 -; CHECK64-NEXT: .cfi_offset w19, -16 -; CHECK64-NEXT: .cfi_offset w26, -24 -; CHECK64-NEXT: .cfi_offset w27, -32 -; CHECK64-NEXT: .cfi_offset w28, -40 +; CHECK64-NEXT: .cfi_offset w19, -8 +; CHECK64-NEXT: .cfi_offset w26, -16 +; CHECK64-NEXT: .cfi_offset w27, -24 +; CHECK64-NEXT: .cfi_offset w28, -32 ; CHECK64-NEXT: .cfi_offset vg, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: addvl sp, sp, #-18 +; CHECK64-NEXT: addvl sp, sp, #-2 ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -1969,30 +1968,32 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: sub sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #-16 +; CHECK64-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK64-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 24 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 32 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 40 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 48 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 56 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 72 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 80 * IncomingVG - 128 ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: bl __arm_sme_state @@ -2014,22 +2015,32 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: mov w0, #22647 // =0x5877 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: add sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #16 +; CHECK64-NEXT: .cfi_restore z8 +; CHECK64-NEXT: .cfi_restore z9 +; CHECK64-NEXT: .cfi_restore z10 +; CHECK64-NEXT: .cfi_restore z11 +; CHECK64-NEXT: .cfi_restore z12 +; CHECK64-NEXT: .cfi_restore z13 +; CHECK64-NEXT: .cfi_restore z14 +; CHECK64-NEXT: .cfi_restore z15 ; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -2042,20 +2053,11 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: addvl sp, sp, #18 -; CHECK64-NEXT: .cfi_restore z8 -; CHECK64-NEXT: .cfi_restore z9 -; CHECK64-NEXT: .cfi_restore z10 -; CHECK64-NEXT: .cfi_restore z11 -; CHECK64-NEXT: .cfi_restore z12 -; CHECK64-NEXT: .cfi_restore z13 -; CHECK64-NEXT: .cfi_restore z14 -; CHECK64-NEXT: .cfi_restore z15 -; CHECK64-NEXT: .cfi_def_cfa wsp, 128 -; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload -; CHECK64-NEXT: add sp, sp, #128 +; CHECK64-NEXT: addvl sp, sp, #2 +; CHECK64-NEXT: .cfi_def_cfa wsp, 64 +; CHECK64-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload ; CHECK64-NEXT: .cfi_def_cfa_offset 0 ; CHECK64-NEXT: .cfi_restore w19 ; CHECK64-NEXT: .cfi_restore w26 @@ -2463,23 +2465,22 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; ; CHECK64-LABEL: svecc_alloca_call: ; CHECK64: // %bb.0: // %entry -; CHECK64-NEXT: sub sp, sp, #128 -; CHECK64-NEXT: .cfi_def_cfa_offset 128 +; CHECK64-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_def_cfa_offset 64 ; CHECK64-NEXT: cntd x9 -; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill -; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill -; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill -; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill -; CHECK64-NEXT: add x29, sp, #64 +; CHECK64-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK64-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK64-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK64-NEXT: mov x29, sp ; CHECK64-NEXT: .cfi_def_cfa w29, 64 -; CHECK64-NEXT: .cfi_offset w19, -16 -; CHECK64-NEXT: .cfi_offset w26, -24 -; CHECK64-NEXT: .cfi_offset w27, -32 -; CHECK64-NEXT: .cfi_offset w28, -40 +; CHECK64-NEXT: .cfi_offset w19, -8 +; CHECK64-NEXT: .cfi_offset w26, -16 +; CHECK64-NEXT: .cfi_offset w27, -24 +; CHECK64-NEXT: .cfi_offset w28, -32 ; CHECK64-NEXT: .cfi_offset vg, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: addvl sp, sp, #-18 +; CHECK64-NEXT: addvl sp, sp, #-2 ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2492,30 +2493,32 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: sub sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #-16 +; CHECK64-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK64-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 24 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 32 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 40 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 48 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 56 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 72 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 80 * IncomingVG - 128 ; CHECK64-NEXT: sub sp, sp, #112 ; CHECK64-NEXT: bl __arm_sme_state ; CHECK64-NEXT: mov x19, x0 @@ -2536,22 +2539,32 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: mov w0, #22647 // =0x5877 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: add sp, sp, #112 -; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: add sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #16 +; CHECK64-NEXT: .cfi_restore z8 +; CHECK64-NEXT: .cfi_restore z9 +; CHECK64-NEXT: .cfi_restore z10 +; CHECK64-NEXT: .cfi_restore z11 +; CHECK64-NEXT: .cfi_restore z12 +; CHECK64-NEXT: .cfi_restore z13 +; CHECK64-NEXT: .cfi_restore z14 +; CHECK64-NEXT: .cfi_restore z15 ; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -2564,20 +2577,11 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: addvl sp, sp, #18 -; CHECK64-NEXT: .cfi_restore z8 -; CHECK64-NEXT: .cfi_restore z9 -; CHECK64-NEXT: .cfi_restore z10 -; CHECK64-NEXT: .cfi_restore z11 -; CHECK64-NEXT: .cfi_restore z12 -; CHECK64-NEXT: .cfi_restore z13 -; CHECK64-NEXT: .cfi_restore z14 -; CHECK64-NEXT: .cfi_restore z15 -; CHECK64-NEXT: .cfi_def_cfa wsp, 128 -; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload -; CHECK64-NEXT: add sp, sp, #128 +; CHECK64-NEXT: addvl sp, sp, #2 +; CHECK64-NEXT: .cfi_def_cfa wsp, 64 +; CHECK64-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload ; CHECK64-NEXT: .cfi_def_cfa_offset 0 ; CHECK64-NEXT: .cfi_restore w19 ; CHECK64-NEXT: .cfi_restore w26 diff --git a/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll b/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll new file mode 100644 index 0000000..a1065bc --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll @@ -0,0 +1,131 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -verify-machineinstrs -mattr=+sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; Muls with (-1) as operand should fold to neg. +define <vscale x 16 x i8> @mul_neg_fold_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { +; CHECK-LABEL: mul_neg_fold_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.b, p0/m, z0.b +; CHECK-NEXT: ret + %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1)) + ret <vscale x 16 x i8> %1 +} + +define <vscale x 8 x i16> @mul_neg_fold_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { +; CHECK-LABEL: mul_neg_fold_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.h, p0/m, z0.h +; CHECK-NEXT: ret + %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1)) + ret <vscale x 8 x i16> %1 +} + +define <vscale x 4 x i32> @mul_neg_fold_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { +; CHECK-LABEL: mul_neg_fold_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.s, p0/m, z0.s +; CHECK-NEXT: ret + %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1)) + ret <vscale x 4 x i32> %1 +} + +define <vscale x 2 x i64> @mul_neg_fold_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { +; CHECK-LABEL: mul_neg_fold_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.d, p0/m, z0.d +; CHECK-NEXT: ret + %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1)) + ret <vscale x 2 x i64> %1 +} + +define <vscale x 16 x i8> @mul_neg_fold_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { +; CHECK-LABEL: mul_neg_fold_u_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.b, p0/m, z0.b +; CHECK-NEXT: ret + %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1)) + ret <vscale x 16 x i8> %1 +} + +define <vscale x 8 x i16> @mul_neg_fold_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { +; CHECK-LABEL: mul_neg_fold_u_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.h, p0/m, z0.h +; CHECK-NEXT: ret + %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1)) + ret <vscale x 8 x i16> %1 +} + +define <vscale x 4 x i32> @mul_neg_fold_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { +; CHECK-LABEL: mul_neg_fold_u_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.s, p0/m, z0.s +; CHECK-NEXT: ret + %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1)) + ret <vscale x 4 x i32> %1 +} + +define <vscale x 2 x i64> @mul_neg_fold_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { +; CHECK-LABEL: mul_neg_fold_u_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.d, p0/m, z0.d +; CHECK-NEXT: ret + %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1)) + ret <vscale x 2 x i64> %1 +} + +define <vscale x 16 x i8> @mul_neg_fold_different_argument_order_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.b, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.b, p0/m, z0.b +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 -1), <vscale x 16 x i8> %a) + ret <vscale x 16 x i8> %1 +} + +define <vscale x 8 x i16> @mul_neg_fold_different_argument_order_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.h, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.h, p0/m, z0.h +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 -1), <vscale x 8 x i16> %a) + ret <vscale x 8 x i16> %1 +} + +define <vscale x 4 x i32> @mul_neg_fold_different_argument_order_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.s, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.s, p0/m, z0.s +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 -1), <vscale x 4 x i32> %a) + ret <vscale x 4 x i32> %1 +} + +define <vscale x 2 x i64> @mul_neg_fold_different_argument_order_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.d, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.d, p0/m, z0.d +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 -1), <vscale x 2 x i64> %a) + ret <vscale x 2 x i64> %1 +} + +declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) + +declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll index 41fda6d..efa51ea 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll @@ -90,26 +90,24 @@ define <3 x i32> @v_load_constant_v3i32_align1(ptr addrspace(4) %ptr) { ; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v10, v[0:1], off offset:8 ; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v11, v[0:1], off offset:9 ; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v12, v[0:1], off offset:11 -; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v0, v[0:1], off offset:10 +; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v13, v[0:1], off offset:10 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0xa ; GFX1250-NOUNALIGNED-NEXT: s_wait_xcnt 0x0 -; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v2 +; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v0, v3, 8, v2 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x8 -; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v3, 16, v4 :: v_dual_lshlrev_b32 v2, 24, v5 +; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v2, 16, v4 :: v_dual_lshlrev_b32 v1, 24, v5 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x6 -; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v4, v7, 8, v6 +; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v3, v7, 8, v6 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x4 -; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v6, 16, v8 :: v_dual_lshlrev_b32 v5, 24, v9 +; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v5, 16, v8 :: v_dual_lshlrev_b32 v4, 24, v9 +; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v0, v1, v2, v0 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x2 -; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v7, v11, 8, v10 -; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NOUNALIGNED-NEXT: v_lshlrev_b32_e32 v8, 24, v12 +; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v10 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NOUNALIGNED-NEXT: v_lshlrev_b32_e32 v9, 16, v0 -; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v0, v2, v3, v1 -; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v1, v5, v6, v4 -; GFX1250-NOUNALIGNED-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v2, v8, v9, v7 +; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v7, 24, v12 :: v_dual_lshlrev_b32 v8, 16, v13 +; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v1, v4, v5, v3 +; GFX1250-NOUNALIGNED-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v2, v7, v8, v6 ; GFX1250-NOUNALIGNED-NEXT: s_set_pc_i64 s[30:31] ; ; GFX9-UNALIGNED-LABEL: v_load_constant_v3i32_align1: @@ -942,7 +940,7 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(ptr addrspace(4) inreg ; ; GFX1250-NOUNALIGNED-LABEL: s_load_constant_v3i32_align1: ; GFX1250-NOUNALIGNED: ; %bb.0: -; GFX1250-NOUNALIGNED-NEXT: s_clause 0xa +; GFX1250-NOUNALIGNED-NEXT: s_clause 0xb ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s2, s[0:1], 0x1 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s3, s[0:1], 0x3 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s4, s[0:1], 0x2 @@ -954,27 +952,26 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(ptr addrspace(4) inreg ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s10, s[0:1], 0x0 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s11, s[0:1], 0x4 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s12, s[0:1], 0xa -; GFX1250-NOUNALIGNED-NEXT: s_wait_xcnt 0x0 -; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s1, s[0:1], 0x8 +; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s13, s[0:1], 0x8 ; GFX1250-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s0, s2, 8 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s3, 24 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s3, s4, 16 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s4, s5, 8 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s2, s3 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s5, s6, 24 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s6, s7, 16 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s7, s8, 8 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s1, s3, 24 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s4, 16 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s3, s5, 8 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s1, s2 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s4, s6, 24 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s5, s7, 16 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s6, s8, 8 ; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s0, s10 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s8, s9, 24 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s2, s0 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s12, 16 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s3, s4, s11 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s4, s5, s6 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s7, s9, 24 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s1, s0 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s1, s12, 16 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s3, s11 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s3, s4, s5 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s4, s6, s13 ; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s5, s7, s1 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s8, s2 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s4, s3 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s2, s5 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s3, s2 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s5, s4 ; GFX1250-NOUNALIGNED-NEXT: ; return to shader part epilog ; ; GFX9-UNALIGNED-LABEL: s_load_constant_v3i32_align1: @@ -1351,11 +1348,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align2(ptr addrspace(4) inreg } define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v3i32_align4: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v3i32_align4: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v3i32_align4: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v3i32_align4: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v3i32_align4: ; GFX9: ; %bb.0: @@ -1388,11 +1399,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(ptr addrspace(4) inreg } define amdgpu_ps i96 @s_load_constant_i96_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_i96_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_i96_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_i96_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_i96_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_i96_align8: ; GFX9: ; %bb.0: @@ -1425,11 +1450,25 @@ define amdgpu_ps i96 @s_load_constant_i96_align8(ptr addrspace(4) inreg %ptr) { } define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v3i32_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v3i32_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v3i32_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v3i32_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v3i32_align8: ; GFX9: ; %bb.0: @@ -1462,11 +1501,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(ptr addrspace(4) inreg } define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v6i16_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v6i16_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v6i16_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v6i16_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v6i16_align8: ; GFX9: ; %bb.0: @@ -1500,24 +1553,64 @@ define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(ptr addrspace(4) inreg } define amdgpu_ps <12 x i8> @s_load_constant_v12i8_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v12i8_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_lshr_b32 s13, s0, 8 -; GFX12-NEXT: s_lshr_b32 s12, s0, 16 -; GFX12-NEXT: s_lshr_b32 s3, s0, 24 -; GFX12-NEXT: s_lshr_b32 s5, s1, 8 -; GFX12-NEXT: s_lshr_b32 s6, s1, 16 -; GFX12-NEXT: s_lshr_b32 s7, s1, 24 -; GFX12-NEXT: s_lshr_b32 s9, s2, 8 -; GFX12-NEXT: s_lshr_b32 s10, s2, 16 -; GFX12-NEXT: s_lshr_b32 s11, s2, 24 -; GFX12-NEXT: s_mov_b32 s4, s1 -; GFX12-NEXT: s_mov_b32 s8, s2 -; GFX12-NEXT: s_mov_b32 s1, s13 -; GFX12-NEXT: s_mov_b32 s2, s12 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v12i8_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s13, s0, 8 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s12, s0, 16 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s3, s0, 24 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s5, s1, 8 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s6, s1, 16 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s7, s1, 24 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s9, s2, 8 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s10, s2, 16 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s11, s2, 24 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s4, s1 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s8, s2 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s1, s13 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s2, s12 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v12i8_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s13, s0, 8 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s12, s0, 16 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s3, s0, 24 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s5, s1, 8 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s6, s1, 16 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s7, s1, 24 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s9, s2, 8 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s10, s2, 16 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s11, s2, 24 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s4, s1 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s8, s2 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s1, s13 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s2, s12 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v12i8_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_lshr_b32 s13, s0, 8 +; GFX1250-NEXT: s_lshr_b32 s12, s0, 16 +; GFX1250-NEXT: s_lshr_b32 s3, s0, 24 +; GFX1250-NEXT: s_lshr_b32 s5, s1, 8 +; GFX1250-NEXT: s_lshr_b32 s6, s1, 16 +; GFX1250-NEXT: s_lshr_b32 s7, s1, 24 +; GFX1250-NEXT: s_lshr_b32 s9, s2, 8 +; GFX1250-NEXT: s_lshr_b32 s10, s2, 16 +; GFX1250-NEXT: s_lshr_b32 s11, s2, 24 +; GFX1250-NEXT: s_mov_b32 s4, s1 +; GFX1250-NEXT: s_mov_b32 s8, s2 +; GFX1250-NEXT: s_mov_b32 s1, s13 +; GFX1250-NEXT: s_mov_b32 s2, s12 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v12i8_align8: ; GFX9: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll index aac499f..b486fabb 100644 --- a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll +++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll @@ -9,15 +9,14 @@ target triple = "amdgcn-amd-amdhsa" define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) { ; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0 -; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_lshlrev_b32 v1, 20, v0 +; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s0, -1 ; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1] ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1 ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo @@ -56,13 +55,11 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspa ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1] ; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS ; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 ; GFX1250-SDAG-NEXT: s_endpgm @@ -91,10 +88,9 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) { ; GFX1250-LABEL: use_flat_to_private_addrspacecast: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS @@ -110,9 +106,8 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) { ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS ; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 ; GFX1250-SDAG-NEXT: s_endpgm @@ -122,9 +117,7 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) { ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 ; GFX1250-GISEL-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll new file mode 100644 index 0000000..0d68762 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-flat-scratch-init-asan.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 6 +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes='amdgpu-attributor' %s -o - | FileCheck %s + +@lds_1 = internal addrspace(3) global [1 x i8] poison, align 4 + +;. +; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4 +;. +define amdgpu_kernel void @k0() #0 { +; CHECK: Function Attrs: sanitize_address +; CHECK-LABEL: define amdgpu_kernel void @k0( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: store i8 7, ptr addrspace(3) @lds_1, align 4 +; CHECK-NEXT: ret void +; + store i8 7, ptr addrspace(3) @lds_1, align 4 + ret void +} + +attributes #0 = { sanitize_address } +; "amdgpu-no-flat-scratch-init" attribute should not be present in attribute list +;. +; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" } +;. diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll index a688b6f..fb566e5 100644 --- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll +++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll @@ -707,8 +707,8 @@ attributes #6 = { "enqueued-block" } ; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" } ; ATTRIBUTOR_HSA: attributes #[[ATTR15:[0-9]+]] = { nounwind "uniform-work-group-size"="false" } ; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } -; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } -; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } +; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } +; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } ; ATTRIBUTOR_HSA: attributes #[[ATTR19:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "uniform-work-group-size"="false" } ; ATTRIBUTOR_HSA: attributes #[[ATTR20:[0-9]+]] = { "enqueued-block" "uniform-work-group-size"="false" } ; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "enqueued-block" "uniform-work-group-size"="false" } diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll index ef52694..54871a6 100644 --- a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll +++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll @@ -538,58 +538,61 @@ define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base ; GFX1250-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 -; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5 ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB34_6 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_cbranch_execnz .LBB34_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB34_8 +; GFX1250-NEXT: .LBB34_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB34_3: ; %atomicrmw.check.private +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB34_3 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global -; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execz .LBB34_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB34_3: ; %Flow +; GFX1250-NEXT: .LBB34_5: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB34_5 -; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB34_7 +; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB34_5: ; %Flow1 +; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off +; GFX1250-NEXT: .LBB34_7: ; %Flow1 ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB34_6: ; %Flow2 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB34_8 -; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_cbranch_execz .LBB34_2 +; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] -; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_wait_dscnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr %ptr, double %val monotonic ret double %result @@ -600,58 +603,61 @@ define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base ; GFX1250-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 -; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5 ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB35_6 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_cbranch_execnz .LBB35_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB35_8 +; GFX1250-NEXT: .LBB35_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB35_3: ; %atomicrmw.check.private +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB35_3 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global -; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execz .LBB35_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB35_3: ; %Flow +; GFX1250-NEXT: .LBB35_5: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB35_5 -; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB35_7 +; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB35_5: ; %Flow1 +; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off +; GFX1250-NEXT: .LBB35_7: ; %Flow1 ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB35_6: ; %Flow2 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB35_8 -; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_cbranch_execz .LBB35_2 +; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] -; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_wait_dscnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -686,40 +692,42 @@ define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB38_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB38_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB38_4 +; GFX1250-NEXT: .LBB38_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB38_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB38_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB38_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB38_2 +; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmin ptr %ptr, double %val monotonic ret double %result @@ -730,40 +738,42 @@ define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB39_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB39_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB39_4 +; GFX1250-NEXT: .LBB39_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB39_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB39_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB39_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB39_2 +; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -798,40 +808,42 @@ define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB42_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB42_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB42_4 +; GFX1250-NEXT: .LBB42_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB42_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB42_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB42_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB42_2 +; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmax ptr %ptr, double %val monotonic ret double %result @@ -842,40 +854,42 @@ define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB43_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB43_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB43_4 +; GFX1250-NEXT: .LBB43_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB43_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB43_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB43_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB43_2 +; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -982,13 +996,11 @@ define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1000,10 +1012,9 @@ define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB52_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1025,13 +1036,11 @@ define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1043,10 +1052,9 @@ define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB53_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1068,13 +1076,11 @@ define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB54_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1086,10 +1092,9 @@ define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB54_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1111,13 +1116,11 @@ define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB55_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1129,10 +1132,9 @@ define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB55_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1154,13 +1156,11 @@ define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB56_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1172,10 +1172,9 @@ define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB56_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1197,13 +1196,11 @@ define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB57_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1215,10 +1212,9 @@ define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB57_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1240,13 +1236,11 @@ define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1258,10 +1252,9 @@ define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB58_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1283,13 +1276,11 @@ define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1301,10 +1292,9 @@ define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB59_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 94ba5cd..6b5647e 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -569,10 +569,10 @@ define <16 x bfloat> @v_load_global_v16bf16(ptr addrspace(1) %ptr) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_load_b128 v[0:3], v[4:5], off -; GFX1250-NEXT: global_load_b128 v[4:7], v[4:5], off offset:16 +; GFX1250-NEXT: global_load_b128 v[0:3], v[8:9], off +; GFX1250-NEXT: global_load_b128 v[4:7], v[8:9], off offset:16 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %load = load <16 x bfloat>, ptr addrspace(1) %ptr @@ -752,12 +752,12 @@ define <32 x bfloat> @v_load_global_v32bf16(ptr addrspace(1) %ptr) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v13, v1 :: v_dual_mov_b32 v12, v0 +; GFX1250-NEXT: v_dual_mov_b32 v17, v1 :: v_dual_mov_b32 v16, v0 ; GFX1250-NEXT: s_clause 0x3 -; GFX1250-NEXT: global_load_b128 v[0:3], v[12:13], off -; GFX1250-NEXT: global_load_b128 v[4:7], v[12:13], off offset:16 -; GFX1250-NEXT: global_load_b128 v[8:11], v[12:13], off offset:32 -; GFX1250-NEXT: global_load_b128 v[12:15], v[12:13], off offset:48 +; GFX1250-NEXT: global_load_b128 v[0:3], v[16:17], off +; GFX1250-NEXT: global_load_b128 v[4:7], v[16:17], off offset:16 +; GFX1250-NEXT: global_load_b128 v[8:11], v[16:17], off offset:32 +; GFX1250-NEXT: global_load_b128 v[12:15], v[16:17], off offset:48 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %load = load <32 x bfloat>, ptr addrspace(1) %ptr @@ -1055,16 +1055,16 @@ define <64 x bfloat> @v_load_global_v64bf16(ptr addrspace(1) %ptr) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v29, v1 :: v_dual_mov_b32 v28, v0 +; GFX1250-NEXT: v_dual_mov_b32 v33, v1 :: v_dual_mov_b32 v32, v0 ; GFX1250-NEXT: s_clause 0x7 -; GFX1250-NEXT: global_load_b128 v[0:3], v[28:29], off -; GFX1250-NEXT: global_load_b128 v[4:7], v[28:29], off offset:16 -; GFX1250-NEXT: global_load_b128 v[8:11], v[28:29], off offset:32 -; GFX1250-NEXT: global_load_b128 v[12:15], v[28:29], off offset:48 -; GFX1250-NEXT: global_load_b128 v[16:19], v[28:29], off offset:64 -; GFX1250-NEXT: global_load_b128 v[20:23], v[28:29], off offset:80 -; GFX1250-NEXT: global_load_b128 v[24:27], v[28:29], off offset:96 -; GFX1250-NEXT: global_load_b128 v[28:31], v[28:29], off offset:112 +; GFX1250-NEXT: global_load_b128 v[0:3], v[32:33], off +; GFX1250-NEXT: global_load_b128 v[4:7], v[32:33], off offset:16 +; GFX1250-NEXT: global_load_b128 v[8:11], v[32:33], off offset:32 +; GFX1250-NEXT: global_load_b128 v[12:15], v[32:33], off offset:48 +; GFX1250-NEXT: global_load_b128 v[16:19], v[32:33], off offset:64 +; GFX1250-NEXT: global_load_b128 v[20:23], v[32:33], off offset:80 +; GFX1250-NEXT: global_load_b128 v[24:27], v[32:33], off offset:96 +; GFX1250-NEXT: global_load_b128 v[28:31], v[32:33], off offset:112 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %load = load <64 x bfloat>, ptr addrspace(1) %ptr diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll index 243f0ed..f8655a7 100644 --- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll @@ -256,7 +256,6 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add ; GCN-NEXT: s_wait_storecnt 0x0 ; GCN-NEXT: .LBB5_3: ; %bb4 ; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GCN-NEXT: s_wait_xcnt 0x0 ; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 63 ; GCN-NEXT: s_wait_kmcnt 0x0 ; GCN-NEXT: global_store_b32 v0, v1, s[0:1] scope:SCOPE_SYS diff --git a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll index ddd3b152..363a248 100644 --- a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll +++ b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll @@ -2700,142 +2700,142 @@ define amdgpu_kernel void @amd_kernel_v32i8(<32 x i8> %arg0) { ; ; GFX1250-LABEL: amd_kernel_v32i8: ; GFX1250: ; %bb.0: ; %entry -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b64_e32 v[8:9], 16 ; GFX1250-NEXT: v_mov_b64_e32 v[10:11], 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_lshr_b32 s16, s0, 16 -; GFX1250-NEXT: s_lshr_b32 s17, s0, 24 -; GFX1250-NEXT: s_lshr_b32 s20, s2, 16 -; GFX1250-NEXT: s_lshr_b32 s21, s2, 24 -; GFX1250-NEXT: s_lshr_b32 s14, s7, 16 -; GFX1250-NEXT: s_lshr_b32 s15, s7, 24 -; GFX1250-NEXT: s_bfe_u32 s27, s7, 0x80008 +; GFX1250-NEXT: s_lshr_b32 s16, s8, 16 +; GFX1250-NEXT: s_lshr_b32 s17, s8, 24 +; GFX1250-NEXT: s_lshr_b32 s6, s15, 16 +; GFX1250-NEXT: s_lshr_b32 s7, s15, 24 +; GFX1250-NEXT: s_bfe_u32 s27, s15, 0x80008 ; GFX1250-NEXT: s_add_co_i32 s17, s17, s17 ; GFX1250-NEXT: s_add_co_i32 s16, s16, s16 -; GFX1250-NEXT: s_lshr_b32 s18, s1, 16 -; GFX1250-NEXT: s_lshr_b32 s19, s1, 24 -; GFX1250-NEXT: s_lshr_b32 s22, s3, 16 -; GFX1250-NEXT: s_lshr_b32 s23, s3, 24 -; GFX1250-NEXT: s_bfe_u32 s29, s1, 0x80008 -; GFX1250-NEXT: s_bfe_u32 s30, s3, 0x80008 -; GFX1250-NEXT: s_add_co_i32 s21, s21, s21 -; GFX1250-NEXT: s_add_co_i32 s20, s20, s20 ; GFX1250-NEXT: s_lshl_b32 s17, s17, 8 ; GFX1250-NEXT: s_and_b32 s16, s16, 0xff -; GFX1250-NEXT: s_add_co_i32 s7, s7, s7 -; GFX1250-NEXT: s_add_co_i32 s27, s27, s27 ; GFX1250-NEXT: s_add_co_i32 s15, s15, s15 -; GFX1250-NEXT: s_add_co_i32 s14, s14, s14 -; GFX1250-NEXT: s_add_co_i32 s3, s3, s3 +; GFX1250-NEXT: s_add_co_i32 s27, s27, s27 +; GFX1250-NEXT: s_add_co_i32 s7, s7, s7 +; GFX1250-NEXT: s_add_co_i32 s6, s6, s6 +; GFX1250-NEXT: s_or_b32 s16, s16, s17 +; GFX1250-NEXT: s_and_b32 s15, s15, 0xff +; GFX1250-NEXT: s_lshl_b32 s17, s27, 8 +; GFX1250-NEXT: s_lshl_b32 s7, s7, 8 +; GFX1250-NEXT: s_and_b32 s6, s6, 0xff +; GFX1250-NEXT: s_or_b32 s15, s15, s17 +; GFX1250-NEXT: s_or_b32 s6, s6, s7 +; GFX1250-NEXT: s_bfe_u32 s26, s14, 0x80008 +; GFX1250-NEXT: s_and_b32 s7, s15, 0xffff +; GFX1250-NEXT: s_lshl_b32 s6, s6, 16 +; GFX1250-NEXT: s_lshr_b32 s20, s10, 16 +; GFX1250-NEXT: s_lshr_b32 s21, s10, 24 +; GFX1250-NEXT: s_lshr_b32 s4, s14, 16 +; GFX1250-NEXT: s_lshr_b32 s5, s14, 24 +; GFX1250-NEXT: s_or_b32 s6, s7, s6 +; GFX1250-NEXT: s_add_co_i32 s7, s14, s14 +; GFX1250-NEXT: s_add_co_i32 s26, s26, s26 +; GFX1250-NEXT: s_lshr_b32 s18, s9, 16 +; GFX1250-NEXT: s_lshr_b32 s19, s9, 24 +; GFX1250-NEXT: s_lshr_b32 s22, s11, 16 +; GFX1250-NEXT: s_lshr_b32 s23, s11, 24 +; GFX1250-NEXT: s_bfe_u32 s29, s9, 0x80008 +; GFX1250-NEXT: s_bfe_u32 s30, s11, 0x80008 +; GFX1250-NEXT: s_add_co_i32 s21, s21, s21 +; GFX1250-NEXT: s_add_co_i32 s20, s20, s20 +; GFX1250-NEXT: s_lshr_b32 s2, s13, 16 +; GFX1250-NEXT: s_lshr_b32 s3, s13, 24 +; GFX1250-NEXT: s_and_b32 s7, s7, 0xff +; GFX1250-NEXT: s_lshl_b32 s14, s26, 8 +; GFX1250-NEXT: s_add_co_i32 s5, s5, s5 +; GFX1250-NEXT: s_add_co_i32 s4, s4, s4 +; GFX1250-NEXT: s_add_co_i32 s11, s11, s11 ; GFX1250-NEXT: s_add_co_i32 s30, s30, s30 ; GFX1250-NEXT: s_add_co_i32 s23, s23, s23 ; GFX1250-NEXT: s_add_co_i32 s22, s22, s22 ; GFX1250-NEXT: s_lshl_b32 s21, s21, 8 ; GFX1250-NEXT: s_and_b32 s20, s20, 0xff -; GFX1250-NEXT: s_add_co_i32 s1, s1, s1 +; GFX1250-NEXT: s_add_co_i32 s9, s9, s9 ; GFX1250-NEXT: s_add_co_i32 s29, s29, s29 ; GFX1250-NEXT: s_add_co_i32 s19, s19, s19 ; GFX1250-NEXT: s_add_co_i32 s18, s18, s18 -; GFX1250-NEXT: s_lshr_b32 s10, s5, 16 -; GFX1250-NEXT: s_lshr_b32 s11, s5, 24 -; GFX1250-NEXT: s_lshr_b32 s12, s6, 16 -; GFX1250-NEXT: s_lshr_b32 s13, s6, 24 -; GFX1250-NEXT: s_or_b32 s16, s16, s17 -; GFX1250-NEXT: s_and_b32 s7, s7, 0xff -; GFX1250-NEXT: s_lshl_b32 s17, s27, 8 -; GFX1250-NEXT: s_lshl_b32 s15, s15, 8 -; GFX1250-NEXT: s_and_b32 s14, s14, 0xff -; GFX1250-NEXT: s_and_b32 s3, s3, 0xff +; GFX1250-NEXT: s_bfe_u32 s25, s13, 0x80008 +; GFX1250-NEXT: s_lshl_b32 s5, s5, 8 +; GFX1250-NEXT: s_and_b32 s4, s4, 0xff +; GFX1250-NEXT: s_or_b32 s7, s7, s14 +; GFX1250-NEXT: s_add_co_i32 s3, s3, s3 +; GFX1250-NEXT: s_add_co_i32 s2, s2, s2 +; GFX1250-NEXT: s_and_b32 s11, s11, 0xff ; GFX1250-NEXT: s_lshl_b32 s30, s30, 8 ; GFX1250-NEXT: s_lshl_b32 s23, s23, 8 ; GFX1250-NEXT: s_and_b32 s22, s22, 0xff ; GFX1250-NEXT: s_or_b32 s20, s20, s21 -; GFX1250-NEXT: s_and_b32 s1, s1, 0xff +; GFX1250-NEXT: s_and_b32 s9, s9, 0xff ; GFX1250-NEXT: s_lshl_b32 s21, s29, 8 ; GFX1250-NEXT: s_lshl_b32 s19, s19, 8 ; GFX1250-NEXT: s_and_b32 s18, s18, 0xff -; GFX1250-NEXT: s_lshr_b32 s8, s4, 16 -; GFX1250-NEXT: s_lshr_b32 s9, s4, 24 -; GFX1250-NEXT: s_bfe_u32 s24, s4, 0x80008 -; GFX1250-NEXT: s_bfe_u32 s25, s5, 0x80008 -; GFX1250-NEXT: s_bfe_u32 s26, s6, 0x80008 -; GFX1250-NEXT: s_or_b32 s7, s7, s17 -; GFX1250-NEXT: s_or_b32 s14, s14, s15 -; GFX1250-NEXT: s_add_co_i32 s13, s13, s13 -; GFX1250-NEXT: s_add_co_i32 s12, s12, s12 -; GFX1250-NEXT: s_add_co_i32 s11, s11, s11 -; GFX1250-NEXT: s_add_co_i32 s10, s10, s10 -; GFX1250-NEXT: s_bfe_u32 s28, s0, 0x80008 -; GFX1250-NEXT: s_or_b32 s3, s3, s30 +; GFX1250-NEXT: s_lshr_b32 s0, s12, 16 +; GFX1250-NEXT: s_lshr_b32 s1, s12, 24 +; GFX1250-NEXT: s_bfe_u32 s24, s12, 0x80008 +; GFX1250-NEXT: s_or_b32 s4, s4, s5 +; GFX1250-NEXT: s_and_b32 s5, s7, 0xffff +; GFX1250-NEXT: s_add_co_i32 s7, s13, s13 +; GFX1250-NEXT: s_add_co_i32 s25, s25, s25 +; GFX1250-NEXT: s_lshl_b32 s3, s3, 8 +; GFX1250-NEXT: s_and_b32 s2, s2, 0xff +; GFX1250-NEXT: s_bfe_u32 s28, s8, 0x80008 +; GFX1250-NEXT: s_or_b32 s11, s11, s30 ; GFX1250-NEXT: s_or_b32 s22, s22, s23 -; GFX1250-NEXT: s_bfe_u32 s23, s2, 0x80008 -; GFX1250-NEXT: s_or_b32 s1, s1, s21 +; GFX1250-NEXT: s_bfe_u32 s23, s10, 0x80008 +; GFX1250-NEXT: s_or_b32 s9, s9, s21 ; GFX1250-NEXT: s_or_b32 s18, s18, s19 -; GFX1250-NEXT: s_and_b32 s7, s7, 0xffff -; GFX1250-NEXT: s_lshl_b32 s14, s14, 16 -; GFX1250-NEXT: s_add_co_i32 s6, s6, s6 -; GFX1250-NEXT: s_add_co_i32 s26, s26, s26 -; GFX1250-NEXT: s_lshl_b32 s13, s13, 8 -; GFX1250-NEXT: s_and_b32 s12, s12, 0xff -; GFX1250-NEXT: s_add_co_i32 s5, s5, s5 -; GFX1250-NEXT: s_add_co_i32 s25, s25, s25 -; GFX1250-NEXT: s_lshl_b32 s11, s11, 8 -; GFX1250-NEXT: s_and_b32 s10, s10, 0xff -; GFX1250-NEXT: s_add_co_i32 s4, s4, s4 +; GFX1250-NEXT: s_lshl_b32 s4, s4, 16 +; GFX1250-NEXT: s_and_b32 s7, s7, 0xff +; GFX1250-NEXT: s_lshl_b32 s13, s25, 8 +; GFX1250-NEXT: s_or_b32 s2, s2, s3 +; GFX1250-NEXT: s_add_co_i32 s3, s12, s12 ; GFX1250-NEXT: s_add_co_i32 s24, s24, s24 -; GFX1250-NEXT: s_add_co_i32 s9, s9, s9 -; GFX1250-NEXT: s_add_co_i32 s8, s8, s8 -; GFX1250-NEXT: s_and_b32 s3, s3, 0xffff +; GFX1250-NEXT: s_add_co_i32 s1, s1, s1 +; GFX1250-NEXT: s_add_co_i32 s0, s0, s0 +; GFX1250-NEXT: s_and_b32 s11, s11, 0xffff ; GFX1250-NEXT: s_lshl_b32 s22, s22, 16 -; GFX1250-NEXT: s_add_co_i32 s2, s2, s2 +; GFX1250-NEXT: s_add_co_i32 s10, s10, s10 ; GFX1250-NEXT: s_add_co_i32 s23, s23, s23 -; GFX1250-NEXT: s_and_b32 s1, s1, 0xffff +; GFX1250-NEXT: s_and_b32 s9, s9, 0xffff ; GFX1250-NEXT: s_lshl_b32 s18, s18, 16 -; GFX1250-NEXT: s_add_co_i32 s0, s0, s0 +; GFX1250-NEXT: s_add_co_i32 s8, s8, s8 ; GFX1250-NEXT: s_add_co_i32 s28, s28, s28 -; GFX1250-NEXT: s_or_b32 s7, s7, s14 -; GFX1250-NEXT: s_and_b32 s6, s6, 0xff -; GFX1250-NEXT: s_lshl_b32 s14, s26, 8 -; GFX1250-NEXT: s_or_b32 s12, s12, s13 -; GFX1250-NEXT: s_and_b32 s5, s5, 0xff -; GFX1250-NEXT: s_lshl_b32 s13, s25, 8 -; GFX1250-NEXT: s_or_b32 s10, s10, s11 -; GFX1250-NEXT: s_and_b32 s4, s4, 0xff -; GFX1250-NEXT: s_lshl_b32 s11, s24, 8 -; GFX1250-NEXT: s_lshl_b32 s9, s9, 8 -; GFX1250-NEXT: s_and_b32 s8, s8, 0xff -; GFX1250-NEXT: s_or_b32 s3, s3, s22 -; GFX1250-NEXT: s_and_b32 s2, s2, 0xff -; GFX1250-NEXT: s_lshl_b32 s22, s23, 8 -; GFX1250-NEXT: s_or_b32 s1, s1, s18 +; GFX1250-NEXT: s_or_b32 s4, s5, s4 +; GFX1250-NEXT: s_or_b32 s5, s7, s13 +; GFX1250-NEXT: s_and_b32 s3, s3, 0xff +; GFX1250-NEXT: s_lshl_b32 s7, s24, 8 +; GFX1250-NEXT: s_lshl_b32 s1, s1, 8 ; GFX1250-NEXT: s_and_b32 s0, s0, 0xff +; GFX1250-NEXT: s_or_b32 s11, s11, s22 +; GFX1250-NEXT: s_and_b32 s10, s10, 0xff +; GFX1250-NEXT: s_lshl_b32 s22, s23, 8 +; GFX1250-NEXT: s_or_b32 s9, s9, s18 +; GFX1250-NEXT: s_and_b32 s8, s8, 0xff ; GFX1250-NEXT: s_lshl_b32 s18, s28, 8 -; GFX1250-NEXT: s_or_b32 s6, s6, s14 -; GFX1250-NEXT: s_or_b32 s5, s5, s13 -; GFX1250-NEXT: s_or_b32 s4, s4, s11 -; GFX1250-NEXT: s_or_b32 s8, s8, s9 -; GFX1250-NEXT: s_or_b32 s2, s2, s22 -; GFX1250-NEXT: s_or_b32 s0, s0, s18 -; GFX1250-NEXT: s_and_b32 s6, s6, 0xffff -; GFX1250-NEXT: s_lshl_b32 s12, s12, 16 +; GFX1250-NEXT: s_or_b32 s3, s3, s7 +; GFX1250-NEXT: s_or_b32 s0, s0, s1 +; GFX1250-NEXT: s_or_b32 s10, s10, s22 +; GFX1250-NEXT: s_or_b32 s8, s8, s18 ; GFX1250-NEXT: s_and_b32 s5, s5, 0xffff -; GFX1250-NEXT: s_and_b32 s4, s4, 0xffff -; GFX1250-NEXT: s_lshl_b32 s8, s8, 16 -; GFX1250-NEXT: s_lshl_b32 s9, s10, 16 -; GFX1250-NEXT: s_and_b32 s2, s2, 0xffff +; GFX1250-NEXT: s_and_b32 s1, s3, 0xffff +; GFX1250-NEXT: s_lshl_b32 s0, s0, 16 +; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 +; GFX1250-NEXT: s_and_b32 s10, s10, 0xffff ; GFX1250-NEXT: s_lshl_b32 s20, s20, 16 -; GFX1250-NEXT: s_and_b32 s0, s0, 0xffff +; GFX1250-NEXT: s_and_b32 s8, s8, 0xffff ; GFX1250-NEXT: s_lshl_b32 s16, s16, 16 -; GFX1250-NEXT: s_or_b32 s6, s6, s12 -; GFX1250-NEXT: s_or_b32 s4, s4, s8 -; GFX1250-NEXT: s_or_b32 s5, s5, s9 -; GFX1250-NEXT: s_or_b32 s2, s2, s20 -; GFX1250-NEXT: s_or_b32 s0, s0, s16 -; GFX1250-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 -; GFX1250-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 -; GFX1250-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1 -; GFX1250-NEXT: v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s3 +; GFX1250-NEXT: s_or_b32 s0, s1, s0 +; GFX1250-NEXT: s_or_b32 s1, s5, s2 +; GFX1250-NEXT: s_or_b32 s10, s10, s20 +; GFX1250-NEXT: s_or_b32 s8, s8, s16 +; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s6 +; GFX1250-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GFX1250-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_store_b128 v[8:9], v[0:3], off ; GFX1250-NEXT: global_store_b128 v[10:11], v[4:7], off diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll index 51652a0..2ae6fc2 100644 --- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll +++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll @@ -117,12 +117,12 @@ define amdgpu_kernel void @sadd64rr(ptr addrspace(1) %out, i64 %a, i64 %b) { ; ; GFX1250-LABEL: sadd64rr: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5] +; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] @@ -818,17 +818,17 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car ; ; GFX1250-LABEL: suaddo64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_add_nc_u64 s[6:7], s[4:5], s[6:7] -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 +; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[12:13], s[14:15] +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13] +; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] -; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9] +; GFX1250-NEXT: global_store_b8 v2, v3, s[10:11] ; GFX1250-NEXT: s_endpgm %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %uadd, 0 @@ -1096,12 +1096,12 @@ define amdgpu_kernel void @ssub64rr(ptr addrspace(1) %out, i64 %a, i64 %b) { ; ; GFX1250-LABEL: ssub64rr: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_nc_u64 s[2:3], s[2:3], s[4:5] +; GFX1250-NEXT: s_sub_nc_u64 s[2:3], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] @@ -1798,17 +1798,17 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car ; ; GFX1250-LABEL: susubo64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_nc_u64 s[6:7], s[4:5], s[6:7] -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 +; GFX1250-NEXT: s_sub_nc_u64 s[0:1], s[12:13], s[14:15] +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13] +; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] -; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9] +; GFX1250-NEXT: global_store_b8 v2, v3, s[10:11] ; GFX1250-NEXT: s_endpgm %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %usub, 0 @@ -3099,70 +3099,70 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; ; GFX1250-LABEL: sudiv64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] +; GFX1250-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_b64 s[6:7], s[6:7], 0xffffffff00000000 -; GFX1250-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX1250-NEXT: s_and_b64 s[4:5], s[4:5], 0xffffffff00000000 +; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_cvt_f32_u32 s6, s4 -; GFX1250-NEXT: s_cvt_f32_u32 s7, s5 -; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[4:5] +; GFX1250-NEXT: s_cvt_f32_u32 s4, s6 +; GFX1250-NEXT: s_cvt_f32_u32 s5, s7 +; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_fmac_f32 s6, s7, 0x4f800000 -; GFX1250-NEXT: v_s_rcp_f32 s6, s6 +; GFX1250-NEXT: s_fmac_f32 s4, s5, 0x4f800000 +; GFX1250-NEXT: v_s_rcp_f32 s4, s4 ; GFX1250-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_mul_f32 s6, s6, 0x5f7ffffc -; GFX1250-NEXT: s_mul_f32 s7, s6, 0x2f800000 +; GFX1250-NEXT: s_mul_f32 s4, s4, 0x5f7ffffc +; GFX1250-NEXT: s_mul_f32 s5, s4, 0x2f800000 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_trunc_f32 s7, s7 -; GFX1250-NEXT: s_fmac_f32 s6, s7, 0xcf800000 -; GFX1250-NEXT: s_cvt_u32_f32 s9, s7 -; GFX1250-NEXT: s_mov_b32 s7, 0 +; GFX1250-NEXT: s_trunc_f32 s5, s5 +; GFX1250-NEXT: s_fmac_f32 s4, s5, 0xcf800000 +; GFX1250-NEXT: s_cvt_u32_f32 s9, s5 +; GFX1250-NEXT: s_mov_b32 s5, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_cvt_u32_f32 s8, s6 +; GFX1250-NEXT: s_cvt_u32_f32 s8, s4 ; GFX1250-NEXT: s_mul_u64 s[12:13], s[10:11], s[8:9] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_hi_u32 s15, s8, s13 ; GFX1250-NEXT: s_mul_i32 s14, s8, s13 -; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s12 +; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s12 ; GFX1250-NEXT: s_mul_i32 s17, s9, s12 -; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[6:7], s[14:15] +; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[4:5], s[14:15] ; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s12 ; GFX1250-NEXT: s_mul_hi_u32 s18, s9, s13 -; GFX1250-NEXT: s_add_co_u32 s6, s14, s17 -; GFX1250-NEXT: s_add_co_ci_u32 s6, s15, s16 +; GFX1250-NEXT: s_add_co_u32 s4, s14, s17 +; GFX1250-NEXT: s_add_co_ci_u32 s4, s15, s16 ; GFX1250-NEXT: s_mul_i32 s12, s9, s13 ; GFX1250-NEXT: s_add_co_ci_u32 s13, s18, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13] +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13] ; GFX1250-NEXT: s_add_co_u32 s8, s8, s12 -; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1250-NEXT: s_cselect_b32 s4, -1, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_cmp_lg_u32 s6, 0 +; GFX1250-NEXT: s_cmp_lg_u32 s4, 0 ; GFX1250-NEXT: s_add_co_ci_u32 s9, s9, s13 ; GFX1250-NEXT: s_mul_u64 s[10:11], s[10:11], s[8:9] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_hi_u32 s13, s8, s11 ; GFX1250-NEXT: s_mul_i32 s12, s8, s11 -; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s10 +; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s10 ; GFX1250-NEXT: s_mul_i32 s15, s9, s10 -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13] +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13] ; GFX1250-NEXT: s_mul_hi_u32 s14, s9, s10 ; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s11 -; GFX1250-NEXT: s_add_co_u32 s6, s12, s15 -; GFX1250-NEXT: s_add_co_ci_u32 s6, s13, s14 +; GFX1250-NEXT: s_add_co_u32 s4, s12, s15 +; GFX1250-NEXT: s_add_co_ci_u32 s4, s13, s14 ; GFX1250-NEXT: s_mul_i32 s10, s9, s11 ; GFX1250-NEXT: s_add_co_ci_u32 s11, s16, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[6:7], s[10:11] +; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[4:5], s[10:11] ; GFX1250-NEXT: s_add_co_u32 s8, s8, s10 ; GFX1250-NEXT: s_cselect_b32 s10, -1, 0 -; GFX1250-NEXT: s_mul_hi_u32 s6, s2, s8 +; GFX1250-NEXT: s_mul_hi_u32 s4, s2, s8 ; GFX1250-NEXT: s_cmp_lg_u32 s10, 0 ; GFX1250-NEXT: s_mul_hi_u32 s12, s3, s8 ; GFX1250-NEXT: s_add_co_ci_u32 s10, s9, s11 @@ -3170,33 +3170,33 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1250-NEXT: s_mul_hi_u32 s9, s2, s10 ; GFX1250-NEXT: s_mul_i32 s8, s2, s10 ; GFX1250-NEXT: s_mul_hi_u32 s13, s3, s10 -; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[8:9] +; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[8:9] ; GFX1250-NEXT: s_mul_i32 s10, s3, s10 -; GFX1250-NEXT: s_add_co_u32 s6, s8, s11 -; GFX1250-NEXT: s_add_co_ci_u32 s6, s9, s12 +; GFX1250-NEXT: s_add_co_u32 s4, s8, s11 +; GFX1250-NEXT: s_add_co_ci_u32 s4, s9, s12 ; GFX1250-NEXT: s_add_co_ci_u32 s11, s13, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[10:11] +; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[10:11] ; GFX1250-NEXT: s_and_b64 s[10:11], s[8:9], 0xffffffff00000000 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_or_b32 s10, s10, s8 -; GFX1250-NEXT: s_mul_u64 s[8:9], s[4:5], s[10:11] +; GFX1250-NEXT: s_mul_u64 s[8:9], s[6:7], s[10:11] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_sub_co_u32 s6, s2, s8 +; GFX1250-NEXT: s_sub_co_u32 s4, s2, s8 ; GFX1250-NEXT: s_cselect_b32 s8, -1, 0 ; GFX1250-NEXT: s_sub_co_i32 s12, s3, s9 ; GFX1250-NEXT: s_cmp_lg_u32 s8, 0 -; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s5 -; GFX1250-NEXT: s_sub_co_u32 s13, s6, s4 +; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s7 +; GFX1250-NEXT: s_sub_co_u32 s13, s4, s6 ; GFX1250-NEXT: s_cselect_b32 s14, -1, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_cmp_lg_u32 s14, 0 ; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, 0 -; GFX1250-NEXT: s_cmp_ge_u32 s12, s5 +; GFX1250-NEXT: s_cmp_ge_u32 s12, s7 ; GFX1250-NEXT: s_cselect_b32 s14, -1, 0 -; GFX1250-NEXT: s_cmp_ge_u32 s13, s4 +; GFX1250-NEXT: s_cmp_ge_u32 s13, s6 ; GFX1250-NEXT: s_cselect_b32 s15, -1, 0 -; GFX1250-NEXT: s_cmp_eq_u32 s12, s5 +; GFX1250-NEXT: s_cmp_eq_u32 s12, s7 ; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[10:11], 1 ; GFX1250-NEXT: s_cselect_b32 s16, s15, s14 ; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[10:11], 2 @@ -3206,20 +3206,20 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1250-NEXT: s_cmp_lg_u32 s8, 0 ; GFX1250-NEXT: s_sub_co_ci_u32 s3, s3, s9 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_cmp_ge_u32 s3, s5 +; GFX1250-NEXT: s_cmp_ge_u32 s3, s7 ; GFX1250-NEXT: s_cselect_b32 s8, -1, 0 -; GFX1250-NEXT: s_cmp_ge_u32 s6, s4 -; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 -; GFX1250-NEXT: s_cmp_eq_u32 s3, s5 -; GFX1250-NEXT: s_cselect_b32 s3, s6, s8 +; GFX1250-NEXT: s_cmp_ge_u32 s4, s6 +; GFX1250-NEXT: s_cselect_b32 s4, -1, 0 +; GFX1250-NEXT: s_cmp_eq_u32 s3, s7 +; GFX1250-NEXT: s_cselect_b32 s3, s4, s8 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_cmp_lg_u32 s3, 0 ; GFX1250-NEXT: s_cselect_b32 s9, s13, s11 ; GFX1250-NEXT: s_cselect_b32 s8, s12, s10 ; GFX1250-NEXT: s_cbranch_execnz .LBB16_3 ; GFX1250-NEXT: .LBB16_2: -; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GFX1250-NEXT: s_sub_co_i32 s5, 0, s4 +; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX1250-NEXT: s_sub_co_i32 s4, 0, s6 ; GFX1250-NEXT: s_mov_b32 s9, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1) ; GFX1250-NEXT: v_rcp_iflag_f32_e32 v0, v0 @@ -3228,23 +3228,23 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX1250-NEXT: v_readfirstlane_b32 s3, v0 -; GFX1250-NEXT: s_mul_i32 s5, s5, s3 +; GFX1250-NEXT: s_mul_i32 s4, s4, s3 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_hi_u32 s5, s3, s5 -; GFX1250-NEXT: s_add_co_i32 s3, s3, s5 +; GFX1250-NEXT: s_mul_hi_u32 s4, s3, s4 +; GFX1250-NEXT: s_add_co_i32 s3, s3, s4 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_hi_u32 s3, s2, s3 -; GFX1250-NEXT: s_mul_i32 s5, s3, s4 +; GFX1250-NEXT: s_mul_i32 s4, s3, s6 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_sub_co_i32 s2, s2, s5 -; GFX1250-NEXT: s_add_co_i32 s5, s3, 1 -; GFX1250-NEXT: s_sub_co_i32 s6, s2, s4 -; GFX1250-NEXT: s_cmp_ge_u32 s2, s4 -; GFX1250-NEXT: s_cselect_b32 s3, s5, s3 -; GFX1250-NEXT: s_cselect_b32 s2, s6, s2 -; GFX1250-NEXT: s_add_co_i32 s5, s3, 1 -; GFX1250-NEXT: s_cmp_ge_u32 s2, s4 -; GFX1250-NEXT: s_cselect_b32 s8, s5, s3 +; GFX1250-NEXT: s_sub_co_i32 s2, s2, s4 +; GFX1250-NEXT: s_add_co_i32 s4, s3, 1 +; GFX1250-NEXT: s_sub_co_i32 s5, s2, s6 +; GFX1250-NEXT: s_cmp_ge_u32 s2, s6 +; GFX1250-NEXT: s_cselect_b32 s3, s4, s3 +; GFX1250-NEXT: s_cselect_b32 s2, s5, s2 +; GFX1250-NEXT: s_add_co_i32 s4, s3, 1 +; GFX1250-NEXT: s_cmp_ge_u32 s2, s6 +; GFX1250-NEXT: s_cselect_b32 s8, s4, s3 ; GFX1250-NEXT: .LBB16_3: ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9] diff --git a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir index ce1ea4d..c475efb 100644 --- a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir +++ b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir @@ -504,7 +504,7 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_64 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0 ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 4325385 /* reguse:AReg_64 */, [[COPY1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 3670025 /* reguse:VS_64_with_sub1 */, [[COPY]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 3735561 /* reguse:VReg_64 */, [[COPY]] ; CHECK-NEXT: SI_RETURN %0:vgpr_32 = COPY $vgpr0 undef %1.sub0:areg_64 = COPY %0 @@ -512,7 +512,7 @@ body: | undef %2.sub0:vreg_64 = COPY %0 %2.sub1:vreg_64 = COPY %0 INLINEASM &"; use $0", 0 /* attdialect */, 4325385 /* reguse:AReg_64 */, killed %1 - INLINEASM &"; use $0", 0 /* attdialect */, 3670025 /* reguse:VReg_64 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 3735561 /* reguse:VReg_64 */, killed %2 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll index be60a00..0cae0e5 100644 --- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll +++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll @@ -705,12 +705,13 @@ define amdgpu_kernel void @write2_ptr_subreg_arg_two_val_f32(ptr addrspace(1) %C ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scale_offset -; GFX1250-NEXT: global_load_b32 v0, v0, s[2:3] scale_offset -; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 +; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3] scale_offset +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s5 ; GFX1250-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NEXT: ds_store_b32 v2, v1 offset:32 +; GFX1250-NEXT: ds_store_b32 v0, v1 offset:32 ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: ds_store_b32 v3, v0 offset:32 +; GFX1250-NEXT: ds_store_b32 v3, v2 offset:32 ; GFX1250-NEXT: s_endpgm %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i @@ -1282,14 +1283,14 @@ define amdgpu_kernel void @simple_write2_v4f32_superreg_align4(ptr addrspace(3) ; ; GFX1250-LABEL: simple_write2_v4f32_superreg_align4: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x8 +; GFX1250-NEXT: s_load_b32 s8, s[4:5], 0x0 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX1250-NEXT: s_load_b128 s[0:3], s[6:7], 0x0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s4 +; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s8 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3 ; GFX1250-NEXT: v_dual_mov_b32 v3, s0 :: v_dual_mov_b32 v4, s1 diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 1e7855c..eefc781 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -541,11 +541,10 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -570,9 +569,8 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: s_clause 0x1 @@ -586,14 +584,13 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -618,10 +615,9 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off @@ -727,13 +723,12 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -754,9 +749,8 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: s_clause 0x1 @@ -770,8 +764,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -780,7 +773,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -805,10 +798,9 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off @@ -917,11 +909,10 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -943,9 +934,8 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off @@ -953,15 +943,14 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3 @@ -982,10 +971,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -1069,11 +1057,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3 @@ -1094,9 +1080,8 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off @@ -1104,8 +1089,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -1115,7 +1099,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3 @@ -1136,10 +1120,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -1400,11 +1383,10 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -1429,9 +1411,8 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1445,14 +1426,13 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -1477,10 +1457,9 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -1590,13 +1569,12 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1617,9 +1595,8 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1633,8 +1610,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -1643,7 +1619,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -1668,10 +1644,9 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -1784,11 +1759,10 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -1810,9 +1784,8 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1823,15 +1796,14 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3 @@ -1852,10 +1824,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -1950,11 +1921,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3 @@ -1975,9 +1944,8 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1988,8 +1956,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -1999,7 +1966,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3 @@ -2020,10 +1987,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2295,11 +2261,10 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -2324,9 +2289,8 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2340,14 +2304,13 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -2372,10 +2335,9 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2487,13 +2449,12 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2514,9 +2475,8 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2530,8 +2490,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -2540,7 +2499,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -2565,10 +2524,9 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2683,11 +2641,10 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -2709,9 +2666,8 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2722,15 +2678,14 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3 @@ -2751,10 +2706,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2851,11 +2805,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3 @@ -2876,9 +2828,8 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2889,8 +2840,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -2900,7 +2850,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3 @@ -2921,10 +2871,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3198,11 +3147,10 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -3227,9 +3175,8 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3244,14 +3191,13 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -3276,10 +3222,9 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3390,13 +3335,12 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3417,9 +3361,8 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3434,8 +3377,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -3444,7 +3386,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -3469,10 +3411,9 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3586,11 +3527,10 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -3612,9 +3552,8 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3626,15 +3565,14 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3 @@ -3655,10 +3593,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3754,11 +3691,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3 @@ -3779,9 +3714,8 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3793,8 +3727,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -3804,7 +3737,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3 @@ -3825,10 +3758,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4101,11 +4033,10 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -4130,9 +4061,8 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4147,14 +4077,13 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -4179,10 +4108,9 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4293,13 +4221,12 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4320,9 +4247,8 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4337,8 +4263,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -4347,7 +4272,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -4372,10 +4297,9 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4489,11 +4413,10 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -4515,9 +4438,8 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4529,15 +4451,14 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3 @@ -4558,10 +4479,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4657,11 +4577,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3 @@ -4682,9 +4600,8 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4696,8 +4613,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -4707,7 +4623,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3 @@ -4728,10 +4644,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5004,11 +4919,10 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -5033,9 +4947,8 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5050,14 +4963,13 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -5082,10 +4994,9 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5196,13 +5107,12 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5223,9 +5133,8 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5240,8 +5149,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -5250,7 +5158,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -5275,10 +5183,9 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5392,11 +5299,10 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -5418,9 +5324,8 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5432,15 +5337,14 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3 @@ -5461,10 +5365,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5560,11 +5463,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3 @@ -5585,9 +5486,8 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5599,8 +5499,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -5610,7 +5509,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3 @@ -5631,10 +5530,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5877,11 +5775,10 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -5902,9 +5799,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5918,14 +5814,13 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -5946,10 +5841,9 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6061,13 +5955,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -6084,9 +5977,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6100,8 +5992,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -6110,7 +6001,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -6131,10 +6022,9 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6249,11 +6139,10 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -6272,9 +6161,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6285,15 +6173,14 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3 @@ -6311,10 +6198,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6409,11 +6295,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3 @@ -6431,9 +6315,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6444,8 +6327,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -6455,7 +6337,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3 @@ -6473,10 +6355,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6718,11 +6599,10 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -6743,9 +6623,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6759,14 +6638,13 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -6787,10 +6665,9 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6902,13 +6779,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -6925,9 +6801,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6941,8 +6816,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -6951,7 +6825,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -6972,10 +6846,9 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7090,11 +6963,10 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -7113,9 +6985,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7126,15 +6997,14 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3 @@ -7152,10 +7022,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7250,11 +7119,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3 @@ -7272,9 +7139,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7285,8 +7151,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -7296,7 +7161,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3 @@ -7314,10 +7179,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7559,11 +7423,10 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -7584,9 +7447,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7600,14 +7462,13 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -7628,10 +7489,9 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7743,13 +7603,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -7766,9 +7625,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7782,8 +7640,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -7792,7 +7649,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -7813,10 +7670,9 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7931,11 +7787,10 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -7954,9 +7809,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7967,15 +7821,14 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3 @@ -7993,10 +7846,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8091,11 +7943,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3 @@ -8113,9 +7963,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8126,8 +7975,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -8137,7 +7985,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3 @@ -8155,10 +8003,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8400,11 +8247,10 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -8425,9 +8271,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8441,14 +8286,13 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -8469,10 +8313,9 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8584,13 +8427,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -8607,9 +8449,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8623,8 +8464,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -8633,7 +8473,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -8654,10 +8494,9 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8772,11 +8611,10 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -8795,9 +8633,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8808,15 +8645,14 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3 @@ -8834,10 +8670,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8932,11 +8767,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3 @@ -8954,9 +8787,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8967,8 +8799,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -8978,7 +8809,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3 @@ -8996,10 +8827,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9281,12 +9111,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -9311,9 +9140,8 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off @@ -9328,15 +9156,14 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v3, v10 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v3 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -9361,10 +9188,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9485,13 +9311,12 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -9512,9 +9337,8 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off @@ -9529,10 +9353,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo @@ -9540,7 +9363,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v3, v10 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v3 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -9565,10 +9388,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9690,13 +9512,12 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 -; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -9718,9 +9539,8 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off @@ -9732,15 +9552,15 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, src_flat_scratch_base_hi :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v1, v3, v5 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3 @@ -9761,10 +9581,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9869,11 +9688,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3 @@ -9894,9 +9711,8 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off @@ -9908,18 +9724,18 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, src_flat_scratch_base_hi :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v1, v3, v5 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3 @@ -9940,10 +9756,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -10188,11 +10003,10 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -10214,10 +10028,9 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10233,14 +10046,13 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -10262,18 +10074,17 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -10386,13 +10197,12 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -10410,10 +10220,9 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10429,8 +10238,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -10439,7 +10247,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -10461,18 +10269,17 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -10588,11 +10395,10 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -10610,9 +10416,8 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10625,15 +10430,14 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3 @@ -10650,17 +10454,16 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -10754,11 +10557,9 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3 @@ -10775,9 +10576,8 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10790,8 +10590,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -10801,7 +10600,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3 @@ -10818,17 +10617,16 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -11064,11 +10862,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -11090,10 +10887,9 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -11112,14 +10908,13 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -11141,11 +10936,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11268,13 +11062,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -11292,10 +11085,9 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -11314,8 +11106,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -11324,7 +11115,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -11346,11 +11137,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11476,11 +11266,10 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -11498,9 +11287,8 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -11516,15 +11304,14 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3 @@ -11541,10 +11328,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11648,11 +11434,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3 @@ -11669,9 +11453,8 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -11687,8 +11470,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -11698,7 +11480,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3 @@ -11715,10 +11497,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11834,12 +11615,10 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_4 ; GFX1250-SDAG-NEXT: ; %bb.2: ; %atomicrmw.global @@ -11855,9 +11634,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB110_4: ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-SDAG-NEXT: .LBB110_5: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -11891,10 +11668,9 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB110_6 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s2, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB110_3 @@ -11910,9 +11686,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB110_5 ; GFX1250-GISEL-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-GISEL-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -12060,12 +11834,10 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB111_2: ; %atomicrmw.phi ; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31] ; GFX1250-SDAG-NEXT: .LBB111_3: ; %atomicrmw.check.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_mov_b32 s2, -1 ; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB111_5 @@ -12079,9 +11851,7 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_cbranch_vccnz .LBB111_7 ; GFX1250-SDAG-NEXT: ; %bb.6: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -12112,9 +11882,8 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s1, s3 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB111_6 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s2, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB111_3 @@ -12130,9 +11899,7 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB111_5 ; GFX1250-GISEL-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-GISEL-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -12261,9 +12028,8 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12279,10 +12045,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB112_2: ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-SDAG-NEXT: .LBB112_3: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12301,10 +12066,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB112_2 @@ -12320,10 +12084,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB112_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -12413,9 +12176,8 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12434,11 +12196,10 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB113_2 ; GFX1250-SDAG-NEXT: .LBB113_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 -; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12454,9 +12215,8 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB113_2 @@ -12473,10 +12233,9 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB113_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -12562,9 +12321,8 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12580,10 +12338,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB114_2: ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-SDAG-NEXT: .LBB114_3: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12602,10 +12359,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB114_2 @@ -12621,10 +12377,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB114_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -12714,9 +12469,8 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12735,11 +12489,10 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB115_2 ; GFX1250-SDAG-NEXT: .LBB115_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 -; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12755,9 +12508,8 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB115_2 @@ -12774,10 +12526,9 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB115_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll index 2079543..b5b2655 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll @@ -2179,6 +2179,7 @@ define amdgpu_ps void @flat_addr_64bit_lsr_iv_multiload(ptr inreg %arg, ptr inre ; GFX1250-SDAG-NEXT: flat_load_b32 v1, v0, s[4:5] scope:SCOPE_SYS ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s0, 0x400 +; GFX1250-SDAG-NEXT: ; kill: killed $sgpr4_sgpr5 ; GFX1250-SDAG-NEXT: s_cbranch_scc0 .LBB117_1 ; GFX1250-SDAG-NEXT: ; %bb.2: ; %bb2 ; GFX1250-SDAG-NEXT: s_endpgm @@ -2190,15 +2191,16 @@ define amdgpu_ps void @flat_addr_64bit_lsr_iv_multiload(ptr inreg %arg, ptr inre ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] ; GFX1250-GISEL-NEXT: .LBB117_1: ; %bb3 ; GFX1250-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v1, v3, vcc_lo ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 4, v[2:3] +; GFX1250-GISEL-NEXT: ; kill: killed $vgpr4 killed $vgpr5 +; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 +; GFX1250-GISEL-NEXT: flat_load_b32 v6, v[4:5] scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: flat_load_b32 v6, v[4:5] scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: flat_load_b32 v4, v[4:5] scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0x400, v2 ; GFX1250-GISEL-NEXT: s_cbranch_vccz .LBB117_1 diff --git a/llvm/test/CodeGen/AMDGPU/fmax3.ll b/llvm/test/CodeGen/AMDGPU/fmax3.ll index 4827f75..5e6de6d 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax3.ll @@ -162,32 +162,32 @@ define amdgpu_kernel void @test_fmax3_olt_0_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmax3_olt_0_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max3_num_f32 v0, v0, v1, v2 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -352,32 +352,32 @@ define amdgpu_kernel void @test_fmax3_olt_1_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmax3_olt_1_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max3_num_f32 v0, v2, v0, v1 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -609,62 +609,62 @@ define amdgpu_kernel void @test_fmax3_olt_0_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmax3_olt_0_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_max3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmax3_olt_0_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_max3_num_f16 v0, v0, v1, v2 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 @@ -897,62 +897,62 @@ define amdgpu_kernel void @test_fmax3_olt_1_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmax3_olt_1_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_max3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmax3_olt_1_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_max3_num_f16 v0, v2, v0, v1 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 diff --git a/llvm/test/CodeGen/AMDGPU/fmin3.ll b/llvm/test/CodeGen/AMDGPU/fmin3.ll index 6dfefd8..6a6f232 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin3.ll @@ -162,32 +162,32 @@ define amdgpu_kernel void @test_fmin3_olt_0_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_0_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_min3_num_f32 v0, v0, v1, v2 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -352,32 +352,32 @@ define amdgpu_kernel void @test_fmin3_olt_1_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_1_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_min3_num_f32 v0, v2, v0, v1 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -609,62 +609,62 @@ define amdgpu_kernel void @test_fmin3_olt_0_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmin3_olt_0_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_min3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmin3_olt_0_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_min3_num_f16 v0, v0, v1, v2 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 @@ -897,62 +897,62 @@ define amdgpu_kernel void @test_fmin3_olt_1_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmin3_olt_1_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_min3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmin3_olt_1_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_min3_num_f16 v0, v2, v0, v1 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 @@ -1217,36 +1217,36 @@ define amdgpu_kernel void @test_fmin3_olt_0_f64(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_0_f64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_wait_xcnt 0x1 -; GFX1250-NEXT: s_mov_b32 s12, s6 -; GFX1250-NEXT: s_mov_b32 s13, s7 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s14 +; GFX1250-NEXT: s_mov_b32 s5, s15 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5] ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile double, ptr addrspace(1) %aptr, align 4 %b = load volatile double, ptr addrspace(1) %bptr, align 4 @@ -1427,36 +1427,36 @@ define amdgpu_kernel void @test_fmin3_olt_1_f64(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_1_f64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_wait_xcnt 0x1 -; GFX1250-NEXT: s_mov_b32 s12, s6 -; GFX1250-NEXT: s_mov_b32 s13, s7 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s14 +; GFX1250-NEXT: s_mov_b32 s5, s15 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5] ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[2:3], v[0:1] -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile double, ptr addrspace(1) %aptr, align 4 %b = load volatile double, ptr addrspace(1) %bptr, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll index e532dea..f807169 100644 --- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll +++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll @@ -11,22 +11,20 @@ define void @test_i8load_v4i8store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG: ; %bb.0: ; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 -; GCN-SDAG-NEXT: global_load_u8 v2, v[2:3], off -; GCN-SDAG-NEXT: global_load_u8 v3, v[4:5], off -; GCN-SDAG-NEXT: global_load_u8 v0, v[0:1], off +; GCN-SDAG-NEXT: global_load_u8 v6, v[2:3], off +; GCN-SDAG-NEXT: global_load_u8 v7, v[4:5], off +; GCN-SDAG-NEXT: global_load_u8 v10, v[0:1], off ; GCN-SDAG-NEXT: s_wait_loadcnt 0x2 ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 -; GCN-SDAG-NEXT: v_lshlrev_b16 v1, 8, v2 +; GCN-SDAG-NEXT: v_lshlrev_b16 v0, 8, v6 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x1 -; GCN-SDAG-NEXT: v_lshlrev_b16 v2, 8, v3 +; GCN-SDAG-NEXT: v_lshlrev_b16 v1, 8, v7 +; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GCN-SDAG-NEXT: v_or_b32_e32 v1, v7, v1 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GCN-SDAG-NEXT: v_or_b32_e32 v0, v0, v1 -; GCN-SDAG-NEXT: v_or_b32_e32 v1, v3, v2 -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GCN-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GCN-SDAG-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_bitop2_b32 v0, v10, v0 bitop3:0x54 +; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GCN-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-SDAG-NEXT: v_or_b32_e32 v0, v0, v1 ; GCN-SDAG-NEXT: global_store_b32 v[8:9], v0, off ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] @@ -35,13 +33,15 @@ define void @test_i8load_v4i8store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL: ; %bb.0: ; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 -; GCN-GISEL-NEXT: global_load_u8 v0, v[0:1], off -; GCN-GISEL-NEXT: global_load_u8 v1, v[2:3], off -; GCN-GISEL-NEXT: global_load_u8 v2, v[4:5], off +; GCN-GISEL-NEXT: global_load_u8 v6, v[0:1], off +; GCN-GISEL-NEXT: global_load_u8 v7, v[2:3], off +; GCN-GISEL-NEXT: global_load_u8 v10, v[4:5], off ; GCN-GISEL-NEXT: s_wait_loadcnt 0x1 -; GCN-GISEL-NEXT: v_lshl_or_b32 v0, v1, 8, v0 +; GCN-GISEL-NEXT: s_wait_xcnt 0x2 +; GCN-GISEL-NEXT: v_lshl_or_b32 v0, v7, 8, v6 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: v_dual_lshlrev_b32 v1, 16, v2 :: v_dual_lshlrev_b32 v2, 24, v2 +; GCN-GISEL-NEXT: s_wait_xcnt 0x1 +; GCN-GISEL-NEXT: v_dual_lshlrev_b32 v1, 16, v10 :: v_dual_lshlrev_b32 v2, 24, v10 ; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-GISEL-NEXT: v_or3_b32 v0, v0, v1, v2 ; GCN-GISEL-NEXT: global_store_b32 v[8:9], v0, off @@ -64,21 +64,21 @@ define i16 @test_v7i16_load_store(ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2 ; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 ; GCN-SDAG-NEXT: global_load_b128 v[4:7], v[0:1], off -; GCN-SDAG-NEXT: global_load_b128 v[0:3], v[2:3], off -; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 0 -; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 -; GCN-SDAG-NEXT: v_pk_add_u16 v10, v6, v2 -; GCN-SDAG-NEXT: v_pk_add_u16 v11, v7, v3 +; GCN-SDAG-NEXT: global_load_b128 v[8:11], v[2:3], off ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 12 +; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 +; GCN-SDAG-NEXT: v_pk_add_u16 v1, v6, v10 +; GCN-SDAG-NEXT: v_pk_add_u16 v12, v7, v11 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[6:7], 8 -; GCN-SDAG-NEXT: v_pk_add_u16 v4, v4, v0 -; GCN-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v10 -; GCN-SDAG-NEXT: v_pk_add_u16 v5, v5, v1 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 0 +; GCN-SDAG-NEXT: v_pk_add_u16 v5, v5, v9 +; GCN-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v1 +; GCN-SDAG-NEXT: v_pk_add_u16 v4, v4, v8 ; GCN-SDAG-NEXT: s_clause 0x2 -; GCN-SDAG-NEXT: global_store_b16 v[2:3], v11, off -; GCN-SDAG-NEXT: global_store_b32 v[6:7], v10, off -; GCN-SDAG-NEXT: global_store_b64 v[8:9], v[4:5], off +; GCN-SDAG-NEXT: global_store_b16 v[2:3], v12, off +; GCN-SDAG-NEXT: global_store_b32 v[6:7], v1, off +; GCN-SDAG-NEXT: global_store_b64 v[10:11], v[4:5], off ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] ; ; GCN-GISEL-LABEL: test_v7i16_load_store: @@ -86,28 +86,29 @@ define i16 @test_v7i16_load_store(ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2 ; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 ; GCN-GISEL-NEXT: global_load_b128 v[4:7], v[0:1], off -; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[2:3], off -; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 8 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 10 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12 +; GCN-GISEL-NEXT: global_load_b128 v[8:11], v[2:3], off +; GCN-GISEL-NEXT: s_wait_xcnt 0x0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 2 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 4 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 6 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 8 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 10 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[22:23], 12 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: v_pk_add_u16 v2, v6, v2 -; GCN-GISEL-NEXT: v_pk_add_u16 v4, v4, v0 -; GCN-GISEL-NEXT: v_pk_add_u16 v1, v5, v1 -; GCN-GISEL-NEXT: v_pk_add_u16 v3, v7, v3 +; GCN-GISEL-NEXT: v_pk_add_u16 v1, v6, v10 +; GCN-GISEL-NEXT: v_pk_add_u16 v4, v4, v8 +; GCN-GISEL-NEXT: v_pk_add_u16 v5, v5, v9 +; GCN-GISEL-NEXT: v_pk_add_u16 v6, v7, v11 ; GCN-GISEL-NEXT: s_clause 0x6 -; GCN-GISEL-NEXT: global_store_b16 v[8:9], v4, off -; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[10:11], v4, off -; GCN-GISEL-NEXT: global_store_b16 v[12:13], v1, off -; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[14:15], v1, off -; GCN-GISEL-NEXT: global_store_b16 v[16:17], v2, off -; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[18:19], v2, off -; GCN-GISEL-NEXT: global_store_b16 v[20:21], v3, off -; GCN-GISEL-NEXT: v_lshrrev_b32_e32 v0, 16, v2 +; GCN-GISEL-NEXT: global_store_b16 v[2:3], v4, off +; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[12:13], v4, off +; GCN-GISEL-NEXT: global_store_b16 v[14:15], v5, off +; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[16:17], v5, off +; GCN-GISEL-NEXT: global_store_b16 v[18:19], v1, off +; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[20:21], v1, off +; GCN-GISEL-NEXT: global_store_b16 v[22:23], v6, off +; GCN-GISEL-NEXT: v_lshrrev_b32_e32 v0, 16, v1 ; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31] %vec1 = load <7 x i16>, ptr addrspace(1) %ptr1 %insert = insertelement <7 x i16> %vec1, i16 20, i32 4 @@ -253,8 +254,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: global_load_b128 v[22:25], v[0:1], off offset:32 ; GCN-SDAG-NEXT: global_load_b128 v[26:29], v[0:1], off offset:16 ; GCN-SDAG-NEXT: global_load_b128 v[30:33], v[0:1], off -; GCN-SDAG-NEXT: global_load_b128 v[0:3], v[0:1], off offset:64 -; GCN-SDAG-NEXT: v_mov_b64_e32 v[36:37], 0x70 +; GCN-SDAG-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x70 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[48:49], 48 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[38:39], 0x60 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[50:51], 32 @@ -262,14 +263,15 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: v_mov_b64_e32 v[66:67], 0 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[52:53], 0x50 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[54:55], 64 -; GCN-SDAG-NEXT: v_dual_mov_b32 v34, 0xc8 :: v_dual_mov_b32 v35, 0 +; GCN-SDAG-NEXT: s_wait_xcnt 0x0 +; GCN-SDAG-NEXT: v_dual_mov_b32 v0, 0xc8 :: v_dual_mov_b32 v1, 0 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x7 -; GCN-SDAG-NEXT: global_store_b128 v[36:37], v[6:9], off +; GCN-SDAG-NEXT: global_store_b128 v[2:3], v[6:9], off ; GCN-SDAG-NEXT: s_wait_loadcnt 0x6 ; GCN-SDAG-NEXT: global_store_b128 v[38:39], v[10:13], off ; GCN-SDAG-NEXT: s_wait_loadcnt 0x5 ; GCN-SDAG-NEXT: s_wait_xcnt 0x1 -; GCN-SDAG-NEXT: v_dual_mov_b32 v36, v16 :: v_dual_mov_b32 v37, v17 +; GCN-SDAG-NEXT: v_dual_mov_b32 v2, v16 :: v_dual_mov_b32 v3, v17 ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[12:13], v[12:13], v[12:13] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[10:11] @@ -286,8 +288,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[8:9], v[8:9], v[8:9] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[6:7], v[6:7], v[6:7] ; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 -; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[50:51], v[2:3], v[2:3] -; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[48:49], v[0:1], v[0:1] +; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[50:51], v[36:37], v[36:37] +; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[48:49], v[34:35], v[34:35] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[16:17], v[16:17], v[16:17] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[14:15], 0xc8, v[14:15] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[24:25], 0x64, v[24:25] @@ -298,8 +300,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[20:21], v[20:21], v[20:21] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[18:19], v[18:19], v[18:19] ; GCN-SDAG-NEXT: s_clause 0x1 -; GCN-SDAG-NEXT: global_store_b128 v[52:53], v[34:37], off -; GCN-SDAG-NEXT: global_store_b128 v[54:55], v[0:3], off +; GCN-SDAG-NEXT: global_store_b128 v[52:53], v[0:3], off +; GCN-SDAG-NEXT: global_store_b128 v[54:55], v[34:37], off ; GCN-SDAG-NEXT: s_clause 0x7 ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:96 ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off offset:112 @@ -309,7 +311,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48 ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[30:33], off ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[26:29], off offset:16 -; GCN-SDAG-NEXT: s_wait_xcnt 0x8 +; GCN-SDAG-NEXT: s_wait_xcnt 0x9 ; GCN-SDAG-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33 ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] ; @@ -325,7 +327,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: global_load_b128 v[22:25], v[0:1], off offset:48 ; GCN-GISEL-NEXT: global_load_b128 v[26:29], v[0:1], off offset:96 ; GCN-GISEL-NEXT: global_load_b128 v[30:33], v[0:1], off offset:112 -; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:64 +; GCN-GISEL-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[38:39], 0 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[48:49], 16 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[50:51], 32 @@ -333,7 +335,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: v_mov_b64_e32 v[66:67], 0x60 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[68:69], 0x70 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[54:55], 64 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[34:35], 0xc8 +; GCN-GISEL-NEXT: s_wait_xcnt 0x0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[0:1], 0xc8 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[64:65], 0x50 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x6 ; GCN-GISEL-NEXT: global_store_b128 v[38:39], v[10:13], off @@ -349,7 +352,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: global_store_b128 v[68:69], v[30:33], off ; GCN-GISEL-NEXT: s_wait_xcnt 0x5 ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[12:13], v[12:13], v[12:13] -; GCN-GISEL-NEXT: v_mov_b64_e32 v[36:37], v[8:9] +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], v[8:9] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[10:11] ; GCN-GISEL-NEXT: s_wait_xcnt 0x4 ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[14:15], v[14:15], v[14:15] @@ -361,8 +364,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[22:23], v[22:23], v[22:23] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[24:25], v[24:25], v[24:25] ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[48:49], v[0:1], v[0:1] -; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[50:51], v[2:3], v[2:3] +; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[48:49], v[34:35], v[34:35] +; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[50:51], v[36:37], v[36:37] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[6:7], 0xc8, v[6:7] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[8:9], v[8:9], v[8:9] ; GCN-GISEL-NEXT: s_wait_xcnt 0x1 @@ -372,8 +375,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[30:31], v[30:31], v[30:31] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[32:33], v[32:33], v[32:33] ; GCN-GISEL-NEXT: s_clause 0x1 -; GCN-GISEL-NEXT: global_store_b128 v[54:55], v[0:3], off -; GCN-GISEL-NEXT: global_store_b128 v[64:65], v[34:37], off +; GCN-GISEL-NEXT: global_store_b128 v[54:55], v[34:37], off +; GCN-GISEL-NEXT: global_store_b128 v[64:65], v[0:3], off ; GCN-GISEL-NEXT: s_clause 0x7 ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[14:17], off offset:16 @@ -383,7 +386,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off offset:80 ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[26:29], off offset:96 ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[30:33], off offset:112 -; GCN-GISEL-NEXT: s_wait_xcnt 0x9 +; GCN-GISEL-NEXT: s_wait_xcnt 0x8 ; GCN-GISEL-NEXT: v_dual_mov_b32 v0, v12 :: v_dual_mov_b32 v1, v13 ; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31] %a = load <16 x i64>, ptr addrspace(1) %ptr_a, align 4 @@ -402,16 +405,17 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1, ; GCN-SDAG-LABEL: test_v7i16_load_store_kernel: ; GCN-SDAG: ; %bb.0: ; GCN-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GCN-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GCN-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 ; GCN-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 -; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 8 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[12:13], 0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 ; GCN-SDAG-NEXT: s_clause 0x1 -; GCN-SDAG-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset -; GCN-SDAG-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset +; GCN-SDAG-NEXT: global_load_b128 v[0:3], v8, s[0:1] scale_offset +; GCN-SDAG-NEXT: global_load_b128 v[4:7], v8, s[2:3] scale_offset +; GCN-SDAG-NEXT: s_wait_xcnt 0x0 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 ; GCN-SDAG-NEXT: v_pk_add_u16 v3, v3, v7 ; GCN-SDAG-NEXT: v_pk_add_u16 v2, v2, v6 @@ -428,10 +432,9 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1, ; GCN-GISEL-LABEL: test_v7i16_load_store_kernel: ; GCN-GISEL: ; %bb.0: ; GCN-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GCN-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GCN-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GCN-GISEL-NEXT: s_wait_xcnt 0x0 ; GCN-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6 @@ -440,8 +443,10 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1, ; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 ; GCN-GISEL-NEXT: s_clause 0x1 -; GCN-GISEL-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset -; GCN-GISEL-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset +; GCN-GISEL-NEXT: global_load_b128 v[0:3], v8, s[0:1] scale_offset +; GCN-GISEL-NEXT: global_load_b128 v[4:7], v8, s[2:3] scale_offset +; GCN-GISEL-NEXT: s_wait_xcnt 0x0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 ; GCN-GISEL-NEXT: v_pk_add_u16 v0, v0, v4 ; GCN-GISEL-NEXT: v_pk_add_u16 v1, v1, v5 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll index 90fcb51..fa97380 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll @@ -11,14 +11,11 @@ declare i32 @llvm.amdgcn.cluster.id.z() #0 define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; CHECK-UNKNOWN-LABEL: test_cluster_id_x: ; CHECK-UNKNOWN: ; %bb.0: -; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp9 :: v_dual_mov_b32 v1, 0 ; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-UNKNOWN-NEXT: s_endpgm -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-MESA3D-LABEL: test_cluster_id_x: ; CHECK-MESA3D: .amd_kernel_code_t @@ -68,7 +65,7 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -98,14 +95,11 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; ; CHECK-G-UNKNOWN-LABEL: test_cluster_id_x: ; CHECK-G-UNKNOWN: ; %bb.0: -; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp9 :: v_dual_mov_b32 v1, 0 ; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-G-UNKNOWN-NEXT: s_endpgm -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-G-MESA3D-LABEL: test_cluster_id_x: ; CHECK-G-MESA3D: .amd_kernel_code_t @@ -155,7 +149,7 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -190,14 +184,11 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; CHECK-UNKNOWN-LABEL: test_cluster_id_y: ; CHECK-UNKNOWN: ; %bb.0: -; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp7 :: v_dual_mov_b32 v1, 0 ; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-UNKNOWN-NEXT: s_endpgm -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-MESA3D-LABEL: test_cluster_id_y: ; CHECK-MESA3D: .amd_kernel_code_t @@ -247,7 +238,7 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -277,14 +268,11 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; ; CHECK-G-UNKNOWN-LABEL: test_cluster_id_y: ; CHECK-G-UNKNOWN: ; %bb.0: -; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp7 :: v_dual_mov_b32 v1, 0 ; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-G-UNKNOWN-NEXT: s_endpgm -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-G-MESA3D-LABEL: test_cluster_id_y: ; CHECK-G-MESA3D: .amd_kernel_code_t @@ -334,7 +322,7 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -369,16 +357,14 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; CHECK-UNKNOWN-LABEL: test_cluster_id_z: ; CHECK-UNKNOWN: ; %bb.0: -; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 -; CHECK-UNKNOWN-NEXT: s_lshr_b32 s2, ttmp7, 16 +; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 +; CHECK-UNKNOWN-NEXT: s_wait_xcnt 0x0 +; CHECK-UNKNOWN-NEXT: s_lshr_b32 s0, ttmp7, 16 ; CHECK-UNKNOWN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 ; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-UNKNOWN-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-UNKNOWN-NEXT: global_store_b32 v0, v1, s[2:3] ; CHECK-UNKNOWN-NEXT: s_endpgm -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1 ; ; CHECK-MESA3D-LABEL: test_cluster_id_z: ; CHECK-MESA3D: .amd_kernel_code_t @@ -428,7 +414,7 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -460,16 +446,14 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; ; CHECK-G-UNKNOWN-LABEL: test_cluster_id_z: ; CHECK-G-UNKNOWN: ; %bb.0: -; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 -; CHECK-G-UNKNOWN-NEXT: s_lshr_b32 s2, ttmp7, 16 +; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 +; CHECK-G-UNKNOWN-NEXT: s_wait_xcnt 0x0 +; CHECK-G-UNKNOWN-NEXT: s_lshr_b32 s0, ttmp7, 16 ; CHECK-G-UNKNOWN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 +; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s0 ; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-G-UNKNOWN-NEXT: s_endpgm -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1 ; ; CHECK-G-MESA3D-LABEL: test_cluster_id_z: ; CHECK-G-MESA3D: .amd_kernel_code_t @@ -519,7 +503,7 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll index aa3b7b3..3ef84a3 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll @@ -67,7 +67,7 @@ define amdgpu_kernel void @test_workgroup_id_x(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -155,7 +155,7 @@ define amdgpu_kernel void @test_workgroup_id_x(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -246,7 +246,7 @@ define amdgpu_kernel void @test_workgroup_id_x_optimized(ptr addrspace(1) %out) ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -330,7 +330,7 @@ define amdgpu_kernel void @test_workgroup_id_x_optimized(ptr addrspace(1) %out) ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -421,7 +421,7 @@ define amdgpu_kernel void @test_workgroup_id_y(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -509,7 +509,7 @@ define amdgpu_kernel void @test_workgroup_id_y(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -600,7 +600,7 @@ define amdgpu_kernel void @test_workgroup_id_y_optimized(ptr addrspace(1) %out) ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -684,7 +684,7 @@ define amdgpu_kernel void @test_workgroup_id_y_optimized(ptr addrspace(1) %out) ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -775,7 +775,7 @@ define amdgpu_kernel void @test_workgroup_id_z(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -863,7 +863,7 @@ define amdgpu_kernel void @test_workgroup_id_z(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -956,7 +956,7 @@ define amdgpu_kernel void @test_workgroup_flat_id(ptr addrspace(1) %out) { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1044,7 +1044,7 @@ define amdgpu_kernel void @test_workgroup_flat_id(ptr addrspace(1) %out) { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1135,7 +1135,7 @@ define amdgpu_kernel void @test_workgroup_id_z_optimized(ptr addrspace(1) %out) ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1219,7 +1219,7 @@ define amdgpu_kernel void @test_workgroup_id_z_optimized(ptr addrspace(1) %out) ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll index afe37e3..b8ff9e5 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll @@ -65,7 +65,7 @@ define amdgpu_kernel void @test_workgroup_max_flat_id(ptr addrspace(1) %out) #1 ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -153,7 +153,7 @@ define amdgpu_kernel void @test_workgroup_max_flat_id(ptr addrspace(1) %out) #1 ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll index 7ea4fa5..9bca696 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll @@ -67,7 +67,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -155,7 +155,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -246,7 +246,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x_optimized(ptr addrspace(1) %o ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -330,7 +330,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x_optimized(ptr addrspace(1) %o ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -421,7 +421,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -509,7 +509,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -600,7 +600,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y_optimized(ptr addrspace(1) %o ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -684,7 +684,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y_optimized(ptr addrspace(1) %o ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -775,7 +775,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -863,7 +863,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -954,7 +954,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z_optimized(ptr addrspace(1) %o ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1038,7 +1038,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z_optimized(ptr addrspace(1) %o ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll index 56215ca..67d0410 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll @@ -59,21 +59,20 @@ define amdgpu_kernel void @is_private_vgpr(ptr addrspace(1) %ptr.ptr) { ; GFX9-NEXT: global_store_dword v[0:1], v0, off ; GFX9-NEXT: s_endpgm ; -; GFX1250-SDAG-LABEL: is_private_vgpr: -; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v1 -; GFX1250-SDAG-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0 -; GFX1250-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo -; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v0, off -; GFX1250-SDAG-NEXT: s_endpgm +; GFX1250-LABEL: is_private_vgpr: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0 +; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo +; GFX1250-NEXT: global_store_b32 v[0:1], v0, off +; GFX1250-NEXT: s_endpgm ; ; CI-GISEL-LABEL: is_private_vgpr: ; CI-GISEL: ; %bb.0: @@ -122,22 +121,6 @@ define amdgpu_kernel void @is_private_vgpr(ptr addrspace(1) %ptr.ptr) { ; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo ; GFX11-NEXT: global_store_b32 v[0:1], v0, off ; GFX11-NEXT: s_endpgm -; -; GFX1250-GISEL-LABEL: is_private_vgpr: -; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v1, v2 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0 -; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo -; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v0, off -; GFX1250-GISEL-NEXT: s_endpgm %id = call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds ptr, ptr addrspace(1) %ptr.ptr, i32 %id %ptr = load volatile ptr, ptr addrspace(1) %gep @@ -206,9 +189,8 @@ define amdgpu_kernel void @is_private_sgpr(ptr %ptr) { ; GFX1250-SDAG-LABEL: is_private_sgpr: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x4 -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: s_xor_b32 s0, s0, s1 +; GFX1250-SDAG-NEXT: s_xor_b32 s0, s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s0, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, -1, 0 @@ -285,9 +267,8 @@ define amdgpu_kernel void @is_private_sgpr(ptr %ptr) { ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s0, s1, s0 +; GFX1250-GISEL-NEXT: s_xor_b32 s0, s1, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s0, 0x4000000 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB1_2 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %bb0 @@ -311,5 +292,4 @@ bb1: ; CI: {{.*}} ; GFX10-GISEL: {{.*}} ; GFX11-GISEL: {{.*}} -; GFX1250: {{.*}} ; SI-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll index 4f7bbf8..42a50bb 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll @@ -5,13 +5,13 @@ define amdgpu_kernel void @v_permlane_bcast_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_bcast_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_bcast_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_bcast_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.bcast(i32 %src0, i32 %src1, i32 %src2) @@ -92,13 +92,13 @@ define amdgpu_kernel void @v_permlane_bcast_b32_vvv(ptr addrspace(1) %out, i32 % define amdgpu_kernel void @v_permlane_down_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_down_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_down_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_down_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.down(i32 %src0, i32 %src1, i32 %src2) @@ -179,13 +179,13 @@ define amdgpu_kernel void @v_permlane_down_b32_vvv(ptr addrspace(1) %out, i32 %s define amdgpu_kernel void @v_permlane_up_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_up_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_up_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_up_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.up(i32 %src0, i32 %src1, i32 %src2) @@ -266,13 +266,13 @@ define amdgpu_kernel void @v_permlane_up_b32_vvv(ptr addrspace(1) %out, i32 %src define amdgpu_kernel void @v_permlane_xor_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_xor_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_xor_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_xor_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.xor(i32 %src0, i32 %src1, i32 %src2) diff --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir index 76e2092..abcae69 100644 --- a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir +++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir @@ -69,9 +69,9 @@ body: | bb.0: ; GCN-LABEL: name: merge_s_load_x1_x1_imm_no_scale_offset ; GCN: [[DEF:%[0-9]+]]:sgpr_64 = IMPLICIT_DEF - ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4) - ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[S_LOAD_DWORDX2_IMM]].sub0 - ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_LOAD_DWORDX2_IMM]].sub1 + ; GCN-NEXT: early-clobber %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4) + ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY %3.sub0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed %3.sub1 %0:sgpr_64 = IMPLICIT_DEF %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 0, 0 :: (dereferenceable invariant load (s32)) %2:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 4, 0 :: (dereferenceable invariant load (s32)) diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll index 1e6b77e..4ad161c 100644 --- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll +++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll @@ -471,13 +471,13 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r ; GFX1250-NEXT: s_cmp_eq_u32 s0, 0 ; GFX1250-NEXT: s_cbranch_scc1 .LBB4_3 ; GFX1250-NEXT: ; %bb.1: ; %for.body.preheader -; GFX1250-NEXT: s_load_b128 s[4:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 4, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1] -; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[4:5], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[10:11], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[8:9], v[0:1] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0xb0, v[2:3] ; GFX1250-NEXT: .LBB4_2: ; %for.body @@ -602,13 +602,13 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d, ; GFX1250-NEXT: s_cmp_eq_u32 s0, 0 ; GFX1250-NEXT: s_cbranch_scc1 .LBB5_3 ; GFX1250-NEXT: ; %bb.1: ; %for.body.preheader -; GFX1250-NEXT: s_load_b128 s[4:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 4, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1] -; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[4:5], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[10:11], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[8:9], v[0:1] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0xb0, v[2:3] ; GFX1250-NEXT: .LBB5_2: ; %for.body diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll index dbcd370..08ec0c8 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -1117,18 +1117,19 @@ define amdgpu_kernel void @mad_i64_i32_uniform(ptr addrspace(1) %out, i32 %arg0, ; ; GFX1250-LABEL: mad_i64_i32_uniform: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-NEXT: s_mov_b32 s7, 0 +; GFX1250-NEXT: s_mov_b32 s5, 0 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s4, s2 ; GFX1250-NEXT: s_mov_b32 s2, s3 -; GFX1250-NEXT: s_mov_b32 s3, s7 +; GFX1250-NEXT: s_mov_b32 s3, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_u64 s[2:3], s[6:7], s[2:3] -; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5] +; GFX1250-NEXT: s_mul_u64 s[2:3], s[4:5], s[2:3] +; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir index 61f2629..c19d5a6 100644 --- a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir +++ b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir @@ -33,7 +33,7 @@ name: asm_write_vgpr_accvgpr_write_read body: | bb.0: - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr0 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr0 $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec ... @@ -47,7 +47,7 @@ name: asm_write_vgpr_accvgpr_write_read_partialnop body: | bb.0: - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr0 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr0 S_NOP 0 $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec ... @@ -60,7 +60,7 @@ name: asm_write_vgpr_accvgpr_write_read_otherreg body: | bb.0: liveins: $vgpr0 - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr1 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr1 $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec ... diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll index fef9a9a..ae08054 100644 --- a/llvm/test/CodeGen/AMDGPU/max.ll +++ b/llvm/test/CodeGen/AMDGPU/max.ll @@ -257,16 +257,15 @@ define amdgpu_kernel void @v_test_imax_sge_i8(ptr addrspace(1) %out, ptr addrspa ; ; GFX1250-LABEL: v_test_imax_sge_i8: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_i8 s2, s[2:3], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_i8 s3, s[4:5], 0x0 +; GFX1250-NEXT: s_load_i8 s4, s[2:3], 0x0 +; GFX1250-NEXT: s_load_i8 s5, s[6:7], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_max_i32 s2, s2, s3 +; GFX1250-NEXT: s_max_i32 s2, s4, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b32_e32 v1, s2 ; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1] @@ -701,16 +700,15 @@ define amdgpu_kernel void @v_test_umax_uge_i8(ptr addrspace(1) %out, ptr addrspa ; ; GFX1250-LABEL: v_test_umax_uge_i8: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_u8 s2, s[2:3], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_u8 s3, s[4:5], 0x0 +; GFX1250-NEXT: s_load_u8 s4, s[2:3], 0x0 +; GFX1250-NEXT: s_load_u8 s5, s[6:7], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_max_u32 s2, s2, s3 +; GFX1250-NEXT: s_max_u32 s2, s4, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b32_e32 v1, s2 ; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1] @@ -777,13 +775,12 @@ define amdgpu_kernel void @v_test_umax_ugt_i32(ptr addrspace(1) %out, ptr addrsp ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset -; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b32 s6, s[0:1], 0x0 +; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_u32_e32 v0, s2, v0 -; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1250-NEXT: v_max_u32_e32 v0, s6, v0 +; GFX1250-NEXT: global_store_b32 v1, v0, s[2:3] ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: v_test_umax_ugt_i32: @@ -1122,12 +1119,12 @@ define amdgpu_kernel void @test_umax_ugt_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umax_ugt_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; @@ -1175,12 +1172,12 @@ define amdgpu_kernel void @test_umax_uge_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umax_uge_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; @@ -1228,12 +1225,12 @@ define amdgpu_kernel void @test_imax_sgt_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imax_sgt_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; @@ -1281,12 +1278,12 @@ define amdgpu_kernel void @test_imax_sge_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imax_sge_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll index 311527d..6a3d31f 100644 --- a/llvm/test/CodeGen/AMDGPU/min.ll +++ b/llvm/test/CodeGen/AMDGPU/min.ll @@ -131,14 +131,14 @@ define amdgpu_kernel void @v_test_imin_sle_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_imin_sle_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -1172,14 +1172,14 @@ define amdgpu_kernel void @s_test_imin_sle_v4i16(ptr addrspace(1) %out, <4 x i16 ; ; GFX1250-LABEL: s_test_imin_sle_v4i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_pk_min_i16 v1, s1, s3 ; GFX1250-NEXT: v_pk_min_i16 v0, s0, s2 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[6:7] ; GFX1250-NEXT: s_endpgm %cmp = icmp sle <4 x i16> %a, %b %val = select <4 x i1> %cmp, <4 x i16> %a, <4 x i16> %b @@ -1307,14 +1307,14 @@ define amdgpu_kernel void @v_test_imin_slt_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_imin_slt_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -1484,14 +1484,14 @@ define amdgpu_kernel void @v_test_imin_slt_i16(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_imin_slt_i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_u16 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_u16 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_i16 v1, v1, v2 ; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] scale_offset @@ -1686,16 +1686,16 @@ define amdgpu_kernel void @s_test_imin_slt_v2i32(ptr addrspace(1) %out, <2 x i32 ; ; GFX1250-LABEL: s_test_imin_slt_v2i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_min_i32 s0, s0, s2 ; GFX1250-NEXT: s_min_i32 s1, s1, s3 ; GFX1250-NEXT: v_mov_b32_e32 v0, s0 ; GFX1250-NEXT: v_mov_b32_e32 v1, s1 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[6:7] ; GFX1250-NEXT: s_endpgm %cmp = icmp slt <2 x i32> %a, %b %val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b @@ -2011,14 +2011,14 @@ define amdgpu_kernel void @v_test_umin_ule_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_umin_ule_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -2171,16 +2171,16 @@ define amdgpu_kernel void @v_test_umin_ule_v3i32(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_umin_ule_v3i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 4, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b96 v[0:2], v3, s[2:3] -; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[4:5] +; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[6:7] ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u32_e32 v2, v2, v6 ; GFX1250-NEXT: v_min_u32_e32 v1, v1, v5 @@ -2374,14 +2374,14 @@ define amdgpu_kernel void @v_test_umin_ule_v3i16(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_umin_ule_v3i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 3, v4 ; GFX1250-NEXT: s_wait_loadcnt 0x0 @@ -2611,14 +2611,14 @@ define amdgpu_kernel void @v_test_umin_ult_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_umin_ult_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -2771,14 +2771,14 @@ define amdgpu_kernel void @v_test_umin_ult_i8(ptr addrspace(1) %out, ptr addrspa ; ; GFX1250-LABEL: v_test_umin_ult_i8: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_u8 v1, v0, s[2:3] -; GFX1250-NEXT: global_load_u8 v2, v0, s[4:5] +; GFX1250-NEXT: global_load_u8 v2, v0, s[6:7] ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u16 v1, v1, v2 ; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1] @@ -3023,23 +3023,22 @@ define amdgpu_kernel void @v_test_umin_ult_i32_multi_use(ptr addrspace(1) %out0, ; ; GFX1250-LABEL: v_test_umin_ult_i32_multi_use: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s5, s[6:7], 0x0 +; GFX1250-NEXT: s_load_b32 s0, s[12:13], 0x0 +; GFX1250-NEXT: s_load_b32 s1, s[14:15], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_cmp_lt_u32 s4, s5 -; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1250-NEXT: s_cmp_lt_u32 s0, s1 +; GFX1250-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s6 -; GFX1250-NEXT: s_and_b32 s6, s6, exec_lo -; GFX1250-NEXT: s_cselect_b32 s4, s4, s5 -; GFX1250-NEXT: v_mov_b32_e32 v2, s4 +; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2 +; GFX1250-NEXT: s_and_b32 s2, s2, exec_lo +; GFX1250-NEXT: s_cselect_b32 s0, s0, s1 +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b32 v1, v2, s[0:1] -; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3] +; GFX1250-NEXT: global_store_b32 v1, v2, s[8:9] +; GFX1250-NEXT: global_store_b8 v1, v0, s[10:11] ; GFX1250-NEXT: s_endpgm %a = load i32, ptr addrspace(1) %aptr, align 4 %b = load i32, ptr addrspace(1) %bptr, align 4 @@ -3220,12 +3219,12 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0, ; ; GFX1250-LABEL: v_test_umin_ult_i16_multi_use: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_load_u16 v1, v0, s[6:7] -; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5] +; GFX1250-NEXT: global_load_u16 v1, v0, s[14:15] +; GFX1250-NEXT: global_load_u16 v2, v0, s[12:13] ; GFX1250-NEXT: s_wait_loadcnt 0x1 ; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff, v1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 @@ -3235,8 +3234,8 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0, ; GFX1250-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo ; GFX1250-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: global_store_b8 v0, v2, s[2:3] +; GFX1250-NEXT: global_store_b16 v0, v1, s[8:9] +; GFX1250-NEXT: global_store_b8 v0, v2, s[10:11] ; GFX1250-NEXT: s_endpgm %a = load i16, ptr addrspace(1) %aptr, align 2 %b = load i16, ptr addrspace(1) %bptr, align 2 @@ -4338,12 +4337,12 @@ define amdgpu_kernel void @test_umin_ult_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umin_ult_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp ult i64 %a, %b @@ -4462,12 +4461,12 @@ define amdgpu_kernel void @test_umin_ule_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umin_ule_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp ule i64 %a, %b @@ -4586,12 +4585,12 @@ define amdgpu_kernel void @test_imin_slt_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imin_slt_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp slt i64 %a, %b @@ -4710,12 +4709,12 @@ define amdgpu_kernel void @test_imin_sle_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imin_sle_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp sle i64 %a, %b @@ -4872,14 +4871,14 @@ define amdgpu_kernel void @v_test_imin_sle_v2i16(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_imin_sle_v2i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_pk_min_i16 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -5042,14 +5041,14 @@ define amdgpu_kernel void @v_test_imin_ule_v2i16(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_imin_ule_v2i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_pk_min_u16 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll index baccb4c..d29847e 100644 --- a/llvm/test/CodeGen/AMDGPU/mul.ll +++ b/llvm/test/CodeGen/AMDGPU/mul.ll @@ -450,6 +450,7 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(ptr addrspace(1) %out, i64 %a, ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x34 +; GFX1250-NEXT: ; kill: killed $sgpr4_sgpr5 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_mul_i32 s2, s3, s2 ; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 @@ -613,25 +614,25 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(ptr addrspace(1) %out, ptr add ; ; GFX1250-LABEL: v_trunc_i64_mul_to_i32: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s6, s10 -; GFX1250-NEXT: s_mov_b32 s7, s11 +; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34 +; GFX1250-NEXT: s_mov_b32 s6, -1 +; GFX1250-NEXT: s_mov_b32 s7, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s14, s6 +; GFX1250-NEXT: s_mov_b32 s15, s7 +; GFX1250-NEXT: s_mov_b32 s10, s6 +; GFX1250-NEXT: s_mov_b32 s11, s7 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_mov_b32 s12, s2 ; GFX1250-NEXT: s_mov_b32 s13, s3 ; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null -; GFX1250-NEXT: buffer_load_b32 v1, off, s[4:7], null -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: buffer_load_b32 v1, off, s[8:11], null +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_mul_lo_u32 v0, v1, v0 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[4:7], null ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: v_trunc_i64_mul_to_i32: @@ -2091,11 +2092,11 @@ define amdgpu_kernel void @s_mul_i64(ptr addrspace(1) %out, i64 %a, i64 %b) noun ; ; GFX1250-LABEL: s_mul_i64: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[4:5] +; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[6:7] ; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5] ; GFX1250-NEXT: s_mov_b32 s2, -1 @@ -2292,25 +2293,25 @@ define amdgpu_kernel void @v_mul_i64(ptr addrspace(1) %out, ptr addrspace(1) %ap ; ; GFX1250-LABEL: v_mul_i64: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s6, s10 -; GFX1250-NEXT: s_mov_b32 s7, s11 +; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34 +; GFX1250-NEXT: s_mov_b32 s6, -1 +; GFX1250-NEXT: s_mov_b32 s7, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s14, s6 +; GFX1250-NEXT: s_mov_b32 s15, s7 +; GFX1250-NEXT: s_mov_b32 s10, s6 +; GFX1250-NEXT: s_mov_b32 s11, s7 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_mov_b32 s12, s2 ; GFX1250-NEXT: s_mov_b32 s13, s3 ; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null -; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[4:7], null -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[8:11], null +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: v_mul_i64: @@ -2845,30 +2846,30 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace( ; ; GFX1250-LABEL: mul64_in_branch: ; GFX1250: ; %bb.0: ; %entry -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1250-NEXT: s_cmp_lg_u64 s[12:13], 0 ; GFX1250-NEXT: s_cbranch_scc0 .LBB16_3 ; GFX1250-NEXT: ; %bb.1: ; %else -; GFX1250-NEXT: s_mul_u64 s[4:5], s[4:5], s[6:7] +; GFX1250-NEXT: s_mul_u64 s[0:1], s[12:13], s[14:15] ; GFX1250-NEXT: s_cbranch_execnz .LBB16_4 ; GFX1250-NEXT: .LBB16_2: ; %if -; GFX1250-NEXT: s_mov_b32 s7, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s6, -1 -; GFX1250-NEXT: s_mov_b32 s4, s2 -; GFX1250-NEXT: s_mov_b32 s5, s3 -; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s0, s10 +; GFX1250-NEXT: s_mov_b32 s1, s11 +; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null ; GFX1250-NEXT: s_branch .LBB16_5 ; GFX1250-NEXT: .LBB16_3: -; GFX1250-NEXT: ; implicit-def: $sgpr4_sgpr5 +; GFX1250-NEXT: ; implicit-def: $sgpr0_sgpr1 ; GFX1250-NEXT: s_branch .LBB16_2 ; GFX1250-NEXT: .LBB16_4: -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5] +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] ; GFX1250-NEXT: .LBB16_5: ; %endif -; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s10, -1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: mul64_in_branch: diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index b0651ef..78207c2 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -340,46 +340,46 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fadd_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35] -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1] +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s20 :: v_dual_mov_b32 v33, s21 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s22 :: v_dual_mov_b32 v35, s23 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s18 :: v_dual_mov_b32 v39, s29 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s30 :: v_dual_mov_b32 v41, s31 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s24 :: v_dual_mov_b32 v37, s19 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s28 :: v_dual_mov_b32 v55, s15 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s3 :: v_dual_mov_b32 v52, s12 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s13 :: v_dual_mov_b32 v54, s14 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s7 :: v_dual_mov_b32 v50, s2 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s27 :: v_dual_mov_b32 v46, s4 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s5 :: v_dual_mov_b32 v48, s6 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s25 :: v_dual_mov_b32 v44, s26 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s9 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s10 :: v_dual_mov_b32 v35, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s16 :: v_dual_mov_b32 v33, s17 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[40:41] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[38:39] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[34:35] @@ -395,58 +395,58 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[36:37] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[38:39] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35] -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1] +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v32_vs: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43] @@ -466,14 +466,14 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -1597,46 +1597,46 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fmul_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35] -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1] +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s20 :: v_dual_mov_b32 v33, s21 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s22 :: v_dual_mov_b32 v35, s23 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s18 :: v_dual_mov_b32 v39, s29 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s30 :: v_dual_mov_b32 v41, s31 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s24 :: v_dual_mov_b32 v37, s19 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s28 :: v_dual_mov_b32 v55, s15 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s3 :: v_dual_mov_b32 v52, s12 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s13 :: v_dual_mov_b32 v54, s14 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s7 :: v_dual_mov_b32 v50, s2 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s27 :: v_dual_mov_b32 v46, s4 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s5 :: v_dual_mov_b32 v48, s6 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s25 :: v_dual_mov_b32 v44, s26 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s9 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s10 :: v_dual_mov_b32 v35, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s16 :: v_dual_mov_b32 v33, s17 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[40:41] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[38:39] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[34:35] @@ -1652,58 +1652,58 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[36:37] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[38:39] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35] -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1] +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fmul_v32_vs: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43] @@ -1723,14 +1723,14 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -2428,46 +2428,46 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fma_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35] -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1] +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[20:21] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[22:23] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[30:31] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[28:29] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[12:13] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[14:15] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[4:5] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[6:7] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[24:25] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[26:27] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[18:19] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[40:41] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[50:51] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[48:49] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[20:21] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[22:23] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[12:13] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[14:15] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[44:45] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[46:47] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[38:39] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[40:41], v[40:41] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[38:39], v[38:39] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[52:53], v[52:53] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[32:33], v[32:33] @@ -2482,58 +2482,58 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[36:37], v[36:37] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[38:39], v[38:39] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35] -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1] +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v32_vs: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43] @@ -2553,14 +2553,14 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -3529,9 +3529,9 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; ; GFX1250-SDAG-LABEL: fadd_fadd_fsub: ; GFX1250-SDAG: ; %bb.0: ; %bb +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3 @@ -3541,14 +3541,14 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5] +; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[6:7] ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_fadd_fsub: ; GFX1250-GISEL: ; %bb.0: ; %bb +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] @@ -3560,7 +3560,7 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1] ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[6:7] ; GFX1250-GISEL-NEXT: s_endpgm bb: %i12 = fadd <2 x float> %arg, %arg1 diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll index b717f85..6671201 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll @@ -186,12 +186,12 @@ define amdgpu_kernel void @mixed_inreg_block_count_x(ptr addrspace(1) %out, i32 ; ; GFX1250-LABEL: mixed_inreg_block_count_x: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x10 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: s_load_b32 s4, s[0:1], 0x10 +; GFX1250-NEXT: s_load_b64 s[2:3], s[0:1], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] ; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll index 4d367ef..c1764c9 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll @@ -346,10 +346,10 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) inreg %out, ptr ad ; ; GFX1250-LABEL: byref_preload_arg: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100 +; GFX1250-NEXT: s_load_b64 s[4:5], s[0:1], 0x100 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 -; GFX1250-NEXT: v_mov_b32_e32 v2, s1 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: v_mov_b32_e32 v2, s5 ; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_storecnt 0x0 ; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS @@ -404,10 +404,10 @@ define amdgpu_kernel void @byref_staggered_preload_arg(ptr addrspace(1) inreg %o ; ; GFX1250-LABEL: byref_staggered_preload_arg: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100 +; GFX1250-NEXT: s_load_b64 s[4:5], s[0:1], 0x100 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 -; GFX1250-NEXT: v_mov_b32_e32 v2, s1 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: v_mov_b32_e32 v2, s5 ; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_storecnt 0x0 ; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS diff --git a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir index b32e997..80afe7a 100644 --- a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir +++ b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir @@ -43,17 +43,17 @@ machineFunctionInfo: body: | bb.0: - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $agpr0 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2424842 /* regdef:AGPR_32 */, implicit-def $agpr0 %14:vgpr_32 = COPY killed $agpr0 - INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 27394058 /* regdef:VReg_512 */, def %7, 13697034 /* regdef:VReg_256 */, def %8, 6225930 /* regdef:VReg_128 */, def %9, 4915210 /* regdef:VReg_96 */, def %10, 4915210 /* regdef:VReg_96 */, def %11 + INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 40042506 /* regdef:VReg_512 */, def %7, 19464202 /* regdef:VReg_256 */, def %8, 7929866 /* regdef:VReg_128 */, def %9, 5963786 /* regdef:VReg_96 */, def %10, 5963786 /* regdef:VReg_96 */, def %11 INLINEASM &"; clobber", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def dead early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15_agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, 12 /* clobber */, implicit-def dead early-clobber $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27394057 /* reguse:VReg_512 */, %7 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 13697033 /* reguse:VReg_256 */, %8 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6225929 /* reguse:VReg_128 */, %9 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:VReg_96 */, %10 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:VReg_96 */, %11 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40042505 /* reguse:VReg_512 */, %7 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 19464201 /* reguse:VReg_256 */, %8 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, %9 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5963785 /* reguse:VReg_96 */, %10 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5963785 /* reguse:VReg_96 */, %11 $agpr1 = COPY %14 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9 /* reguse */, killed $agpr1 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, killed $agpr1 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir index 1b09f5d..ad490f8 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir @@ -41,9 +41,9 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]] - ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3801097 /* reguse:AV_64_Align2 */, [[V_MFMA_F64_4X4X4F64_e64_]] + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]] + ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3997705 /* reguse:VReg_64_Align2 */, [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]] ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 %1:av_64_align2 = COPY $vgpr0_vgpr1 @@ -51,7 +51,7 @@ body: | %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1) %4:vreg_128_align2 = COPY %3 %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3801097 /* reguse:VReg_64_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3997705 /* reguse:VReg_64_Align2 */, %5 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir index 4c2ea2f..dcf3b8b 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir @@ -79,7 +79,7 @@ body: | ; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub0_sub1 ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_1:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:VS_64_with_sub0_in_VS_32_Lo128 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s64), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -90,7 +90,7 @@ body: | %other_use:vreg_64_align2 = COPY %4.sub0_sub1 %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec %6:areg_64_align2 = COPY %5 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:AReg_64_Align2 */, %6:areg_64_align2 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, %6:areg_64_align2 GLOBAL_STORE_DWORDX2 %0, %6, 0, 0, implicit $exec :: (store (s64), addrspace 1) SI_RETURN ... @@ -114,7 +114,7 @@ body: | ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_1:%[0-9]+]].sub0_sub1:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:VS_64_with_sub0_in_VS_32_Lo128 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s64), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -126,7 +126,7 @@ body: | undef %5.sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4, 0, 0, 0, implicit $mode, implicit $exec %6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec %7:areg_64_align2 = COPY %6 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:AReg_64_Align2 */, %7 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, %7 GLOBAL_STORE_DWORDX2 %0, %7, 0, 0, implicit $exec :: (store (s64), addrspace 1) SI_RETURN diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll index 335d58c..a18847b 100644 --- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll @@ -324,11 +324,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; SDAG: ; %bb.0: ; %entry ; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1] -; SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3 ; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -350,10 +348,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; SDAG-NEXT: s_cbranch_execz .LBB21_2 ; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private -; SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] ; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 ; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -367,12 +364,12 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; ; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v0, src_flat_scratch_base_hi +; GISEL-NEXT: v_mov_b32_e32 v2, v0 ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GISEL-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 3, s[0:1] ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GISEL-NEXT: v_xor_b32_e32 v0, v5, v0 +; GISEL-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo @@ -394,11 +391,10 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2 ; GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private -; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GISEL-NEXT: v_sub_nc_u32_e32 v0, v4, v0 +; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GISEL-NEXT: s_wait_loadcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll index b5bb68e..e0ea08d 100644 --- a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll @@ -97,9 +97,9 @@ entry: define amdgpu_ps <2 x float> @s_load_b64_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b64_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b64 s[4:5], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -111,10 +111,10 @@ entry: define amdgpu_ps <3 x float> @s_load_b96_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b96_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b96 s[4:6], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_mov_b32_e32 v2, s6 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -126,10 +126,10 @@ entry: define amdgpu_ps <4 x float> @s_load_b128_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b128_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b128 s[4:7], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -141,12 +141,12 @@ entry: define amdgpu_ps <8 x float> @s_load_b256_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b256_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b256 s[4:11], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -158,16 +158,16 @@ entry: define amdgpu_ps <16 x float> @s_load_b512_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b512_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b512 s[4:19], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 -; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 -; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 -; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 -; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 +; GCN-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v9, s13 +; GCN-NEXT: v_dual_mov_b32 v10, s14 :: v_dual_mov_b32 v11, s15 +; GCN-NEXT: v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v13, s17 +; GCN-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s19 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -275,11 +275,11 @@ entry: define amdgpu_ps <2 x float> @s_load_b64_idxprom_range(ptr addrspace(4) align 4 inreg %p) { ; GCN-LABEL: s_load_b64_idxprom_range: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_load_b32 s4, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b64 s[2:3], s[0:1], s4 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -294,10 +294,10 @@ define amdgpu_ps <3 x float> @s_load_b96_idxprom_range(ptr addrspace(4) align 4 ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b96 s[4:6], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_mov_b32_e32 v2, s6 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -312,10 +312,10 @@ define amdgpu_ps <4 x float> @s_load_b128_idxprom_range(ptr addrspace(4) align 4 ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b128 s[4:7], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -330,12 +330,12 @@ define amdgpu_ps <8 x float> @s_load_b256_idxprom_range(ptr addrspace(4) align 4 ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b256 s[4:11], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -350,16 +350,16 @@ define amdgpu_ps <16 x float> @s_load_b512_idxprom_range(ptr addrspace(4) align ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b512 s[4:19], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 -; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 -; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 -; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 -; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 +; GCN-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v9, s13 +; GCN-NEXT: v_dual_mov_b32 v10, s14 :: v_dual_mov_b32 v11, s15 +; GCN-NEXT: v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v13, s17 +; GCN-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s19 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll index f2ecfe8..3d74b17 100644 --- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll +++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll @@ -17,16 +17,16 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3 ; ; GFX1250-LABEL: v_ashr_pk_i8_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_and_b32 s2, s2, 31 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_mov_b32_e32 v0, s2 ; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0 -; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5] +; GFX1250-NEXT: global_store_b16 v1, v0, s[6:7] ; GFX1250-NEXT: s_endpgm %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0 %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1 @@ -58,16 +58,16 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3 ; ; GFX1250-LABEL: v_ashr_pk_u8_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_and_b32 s2, s2, 31 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_mov_b32_e32 v0, s2 ; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0 -; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5] +; GFX1250-NEXT: global_store_b16 v1, v0, s[6:7] ; GFX1250-NEXT: s_endpgm %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0 %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1 diff --git a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir index af8b9e7..6fe99d8 100644 --- a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir +++ b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir @@ -520,6 +520,7 @@ body: | ; GCN-NEXT: GLOBAL_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr4_vgpr5, 16, 0, implicit $exec ; GCN-NEXT: S_WAIT_KMCNT 0 ; GCN-NEXT: $sgpr2 = S_ADD_I32 $sgpr0, 100, implicit-def $scc + ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 20, implicit $exec $sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 0, 0 :: (load (s64), addrspace 4) $vgpr0 = V_MOV_B32_e32 1, implicit $exec @@ -921,7 +922,6 @@ body: | $vgpr2 = V_MOV_B32_e32 1, implicit $exec ... -# FIXME: Missing S_WAIT_XCNT before overwriting vgpr0. --- name: wait_kmcnt_with_outstanding_vmem tracksRegLiveness: true @@ -937,6 +937,7 @@ body: | ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec ; GCN-NEXT: S_WAIT_KMCNT 0 ; GCN-NEXT: $sgpr2 = S_MOV_B32 $sgpr2 + ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec @@ -944,7 +945,6 @@ body: | $vgpr0 = V_MOV_B32_e32 0, implicit $exec ... -# FIXME: Missing S_WAIT_XCNT before overwriting sgpr0. --- name: wait_loadcnt_with_outstanding_smem tracksRegLiveness: true @@ -960,6 +960,7 @@ body: | ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 ; GCN-NEXT: S_WAIT_LOADCNT 0 ; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr2, implicit $exec + ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $sgpr0 = S_MOV_B32 0 $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 @@ -967,7 +968,6 @@ body: | $sgpr0 = S_MOV_B32 0 ... -# TODO: Unnecessary wait before overwriting vgpr0. --- name: overwrite_vgpr_after_smem tracksRegLiveness: true @@ -981,14 +981,12 @@ body: | ; GCN-NEXT: {{ $}} ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 - ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 $vgpr0 = V_MOV_B32_e32 0, implicit $exec ... -# TODO: Unnecessary wait before overwriting sgpr0. --- name: overwrite_sgpr_after_vmem tracksRegLiveness: true @@ -1002,7 +1000,6 @@ body: | ; GCN-NEXT: {{ $}} ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec - ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $sgpr0 = S_MOV_B32 0 $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll b/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll index a392692..6636eb5 100644 --- a/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll +++ b/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll @@ -211,38 +211,39 @@ define amdgpu_kernel void @workgroup_id_xyz(ptr addrspace(1) %ptrx, ptr addrspac ; GFX1250-SDAG-LABEL: workgroup_id_xyz: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_bfe_u32 s0, ttmp6, 0x40014 -; GFX1250-SDAG-NEXT: s_lshr_b32 s6, ttmp7, 16 -; GFX1250-SDAG-NEXT: s_add_co_i32 s7, s0, 1 +; GFX1250-SDAG-NEXT: s_lshr_b32 s8, ttmp7, 16 +; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s0, 1 +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-SDAG-NEXT: s_mul_i32 s4, s8, s9 ; GFX1250-SDAG-NEXT: s_bfe_u32 s9, ttmp6, 0x40010 -; GFX1250-SDAG-NEXT: s_mul_i32 s7, s6, s7 -; GFX1250-SDAG-NEXT: s_bfe_u32 s8, ttmp6, 0x40008 +; GFX1250-SDAG-NEXT: s_bfe_u32 s5, ttmp6, 0x40008 ; GFX1250-SDAG-NEXT: s_and_b32 s10, ttmp7, 0xffff ; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, 1 ; GFX1250-SDAG-NEXT: s_bfe_u32 s11, ttmp6, 0x4000c -; GFX1250-SDAG-NEXT: s_add_co_i32 s8, s8, s7 -; GFX1250-SDAG-NEXT: s_mul_i32 s7, s10, s9 +; GFX1250-SDAG-NEXT: s_add_co_i32 s5, s5, s4 +; GFX1250-SDAG-NEXT: s_mul_i32 s4, s10, s9 ; GFX1250-SDAG-NEXT: s_bfe_u32 s9, ttmp6, 0x40004 ; GFX1250-SDAG-NEXT: s_add_co_i32 s11, s11, 1 -; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, s7 -; GFX1250-SDAG-NEXT: s_and_b32 s7, ttmp6, 15 +; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, s4 +; GFX1250-SDAG-NEXT: s_and_b32 s4, ttmp6, 15 ; GFX1250-SDAG-NEXT: s_mul_i32 s11, ttmp9, s11 ; GFX1250-SDAG-NEXT: s_getreg_b32 s12, hwreg(HW_REG_IB_STS2, 6, 4) -; GFX1250-SDAG-NEXT: s_add_co_i32 s7, s7, s11 +; GFX1250-SDAG-NEXT: s_add_co_i32 s4, s4, s11 ; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s12, 0 -; GFX1250-SDAG-NEXT: s_cselect_b32 s7, ttmp9, s7 +; GFX1250-SDAG-NEXT: s_cselect_b32 s4, ttmp9, s4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s7 -; GFX1250-SDAG-NEXT: s_cselect_b32 s7, s10, s9 -; GFX1250-SDAG-NEXT: s_cselect_b32 s6, s6, s8 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s6 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-SDAG-NEXT: s_cselect_b32 s4, s10, s9 +; GFX1250-SDAG-NEXT: s_cselect_b32 s5, s8, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x2 ; GFX1250-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX1250-SDAG-NEXT: global_store_b32 v0, v2, s[2:3] -; GFX1250-SDAG-NEXT: global_store_b32 v0, v3, s[4:5] +; GFX1250-SDAG-NEXT: global_store_b32 v0, v3, s[6:7] ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: workgroup_id_xyz: @@ -250,39 +251,40 @@ define amdgpu_kernel void @workgroup_id_xyz(ptr addrspace(1) %ptrx, ptr addrspac ; GFX1250-GISEL-NEXT: s_bfe_u32 s0, ttmp6, 0x4000c ; GFX1250-GISEL-NEXT: s_and_b32 s1, ttmp6, 15 ; GFX1250-GISEL-NEXT: s_add_co_i32 s0, s0, 1 -; GFX1250-GISEL-NEXT: s_getreg_b32 s6, hwreg(HW_REG_IB_STS2, 6, 4) +; GFX1250-GISEL-NEXT: s_getreg_b32 s8, hwreg(HW_REG_IB_STS2, 6, 4) ; GFX1250-GISEL-NEXT: s_mul_i32 s0, ttmp9, s0 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-GISEL-NEXT: s_add_co_i32 s1, s1, s0 -; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0 -; GFX1250-GISEL-NEXT: s_cselect_b32 s7, ttmp9, s1 +; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0 +; GFX1250-GISEL-NEXT: s_cselect_b32 s9, ttmp9, s1 ; GFX1250-GISEL-NEXT: s_bfe_u32 s0, ttmp6, 0x40010 -; GFX1250-GISEL-NEXT: s_and_b32 s8, ttmp7, 0xffff +; GFX1250-GISEL-NEXT: s_and_b32 s10, ttmp7, 0xffff ; GFX1250-GISEL-NEXT: s_add_co_i32 s0, s0, 1 -; GFX1250-GISEL-NEXT: s_bfe_u32 s9, ttmp6, 0x40004 -; GFX1250-GISEL-NEXT: s_mul_i32 s10, s8, s0 +; GFX1250-GISEL-NEXT: s_bfe_u32 s11, ttmp6, 0x40004 +; GFX1250-GISEL-NEXT: s_mul_i32 s12, s10, s0 +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 +; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s12 +; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s9 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 -; GFX1250-GISEL-NEXT: s_add_co_i32 s9, s9, s10 -; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s7 -; GFX1250-GISEL-NEXT: s_cselect_b32 s8, s8, s9 -; GFX1250-GISEL-NEXT: s_bfe_u32 s9, ttmp6, 0x40014 +; GFX1250-GISEL-NEXT: s_cselect_b32 s4, s10, s11 +; GFX1250-GISEL-NEXT: s_bfe_u32 s5, ttmp6, 0x40014 ; GFX1250-GISEL-NEXT: s_lshr_b32 s10, ttmp7, 16 -; GFX1250-GISEL-NEXT: s_add_co_i32 s9, s9, 1 +; GFX1250-GISEL-NEXT: s_add_co_i32 s5, s5, 1 ; GFX1250-GISEL-NEXT: s_bfe_u32 s11, ttmp6, 0x40008 -; GFX1250-GISEL-NEXT: s_mul_i32 s9, s10, s9 +; GFX1250-GISEL-NEXT: s_mul_i32 s5, s10, s5 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s9 -; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0 -; GFX1250-GISEL-NEXT: s_cselect_b32 s6, s10, s11 -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v3, s6 +; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s5 +; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0 +; GFX1250-GISEL-NEXT: s_cselect_b32 s5, s10, s11 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x2 ; GFX1250-GISEL-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-GISEL-NEXT: global_store_b32 v1, v2, s[2:3] -; GFX1250-GISEL-NEXT: global_store_b32 v1, v3, s[4:5] +; GFX1250-GISEL-NEXT: global_store_b32 v1, v3, s[6:7] ; GFX1250-GISEL-NEXT: s_endpgm ; GFX12-LABEL: workgroup_id_xyz: ; GFX12: ; %bb.0: diff --git a/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll b/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll new file mode 100644 index 0000000..8b121c5 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/inst_setcc_uno_uo.ll @@ -0,0 +1,93 @@ +;; RUN: llc --mtriple=hexagon -mattr=+hvxv79,+hvx-length128b %s -o - | FileCheck %s + +define dso_local void @store_isnan_f32(ptr %a, ptr %b, ptr %isnan_cmp) local_unnamed_addr { +entry: + %arrayidx_a = getelementptr inbounds nuw float, ptr %a, i32 0 + %arrayidx_b = getelementptr inbounds nuw float, ptr %b, i32 0 + %0 = load <32 x float>, ptr %arrayidx_a, align 4 + %1 = load <32 x float>, ptr %arrayidx_b, align 4 + %.vectorized = fcmp uno <32 x float> %0, %1 + %.LS.instance = zext <32 x i1> %.vectorized to <32 x i32> + %arrayidx1 = getelementptr inbounds nuw i32, ptr %isnan_cmp, i32 0 + store <32 x i32> %.LS.instance, ptr %arrayidx1, align 4 + ret void +} + +; CHECK: store_isnan_f32 +; CHECK: [[RONE32:r[0-9]+]] = #1 +; CHECK: [[VOP2_F32:v[0-9]+]] = vxor([[VOP2_F32]],[[VOP2_F32]]) +; CHECK: [[VOP1_F32:v[0-9]+]] = vmemu(r0+#0) +; CHECK: [[VONES32:v[0-9]+]] = vsplat([[RONE32]]) +; CHECK: [[Q1_F32:q[0-9]+]] = vcmp.eq([[VOP1_F32]].w,[[VOP1_F32]].w) +; CHECK: [[VOP3_F32:v[0-9]+]] = vmemu(r1+#0) +; CHECK: [[Q1_F32]] &= vcmp.eq([[VOP3_F32]].w,[[VOP3_F32]].w) +; CHECK: [[VOUT_F32:v[0-9]+]] = vmux([[Q1_F32]],[[VOP2_F32]],[[VONES32]]) +; CHECK: vmemu(r2+#0) = [[VOUT_F32]] + +define dso_local void @store_isnan_f16(ptr %a, ptr %b, ptr %isnan_cmp) local_unnamed_addr { +entry: + %arrayidx_a = getelementptr inbounds nuw half, ptr %a, i32 0 + %arrayidx_b = getelementptr inbounds nuw half, ptr %b, i32 0 + %0 = load <64 x half>, ptr %arrayidx_a, align 2 + %1 = load <64 x half>, ptr %arrayidx_b, align 2 + %.vectorized = fcmp uno <64 x half> %0, %1 + %conv.LS.instance = zext <64 x i1> %.vectorized to <64 x i16> + %arrayidx1 = getelementptr inbounds nuw i16, ptr %isnan_cmp, i32 0 + store <64 x i16> %conv.LS.instance, ptr %arrayidx1, align 2 + ret void +} +; CHECK-LABEL: store_isnan_f16 +; CHECK: [[RONE16:r[0-9]+]] = #1 +; CHECK: [[VOP2_F16:v[0-9]+]] = vxor([[VOP2_F16]],[[VOP2_F16]]) +; CHECK: [[VOP1_F16:v[0-9]+]] = vmemu(r0+#0) +; CHECK: [[VONES16:v[0-9]+]].h = vsplat([[RONE16]]) +; CHECK: [[Q1_F16:q[0-9]+]] = vcmp.eq([[VOP1_F16]].h,[[VOP1_F16]].h) +; CHECK: [[VOP3_F16:v[0-9]+]] = vmemu(r1+#0) +; CHECK: [[Q1_F16]] &= vcmp.eq([[VOP3_F16]].h,[[VOP3_F16]].h) +; CHECK: [[VOUT_F16:v[0-9]+]] = vmux([[Q1_F16]],[[VOP2_F16]],[[VONES16]]) +; CHECK: vmemu(r2+#0) = [[VOUT_F32]] + +define dso_local void @store_isordered_f32(ptr %a, ptr %b, ptr %isordered_cmp) local_unnamed_addr { +entry: + %arrayidx_a = getelementptr inbounds nuw float, ptr %a, i32 0 + %arrayidx_b = getelementptr inbounds nuw float, ptr %b, i32 0 + %0 = load <32 x float>, ptr %arrayidx_a, align 4 + %1 = load <32 x float>, ptr %arrayidx_b, align 4 + %.vectorized = fcmp ord <32 x float> %0, %1 + %.LS.instance = zext <32 x i1> %.vectorized to <32 x i32> + %arrayidx1 = getelementptr inbounds nuw i32, ptr %isordered_cmp, i32 0 + store <32 x i32> %.LS.instance, ptr %arrayidx1, align 4 + ret void +} +; CHECK-LABEL: store_isordered_f32 +; CHECK: [[VOP2_ORD_F32:v[0-9]+]] = vxor([[VOP2_ORD_F32]],[[VOP2_ORD_F32]]) +; CHECK: [[VOP1_ORD_F32:v[0-9]+]] = vmemu(r0+#0) +; CHECK: [[VONES_ORD_F32:v[0-9]+]] = vsplat([[RONE32]]) +; CHECK: [[Q1_ORD_F32:q[0-9]+]] = vcmp.eq([[VOP1_ORD_F32]].w,[[VOP1_ORD_F32]].w) +; CHECK: [[VOP3_ORD_F32:v[0-9]+]] = vmemu(r1+#0) +; CHECK: [[Q1_ORD_F32]] &= vcmp.eq([[VOP3_ORD_F32]].w,[[VOP3_ORD_F32]].w) +; CHECK: [[VOUT_ORD_F32:v[0-9]+]] = vmux([[Q1_ORD_F32]],[[VONES_ORD_F32]],[[VOP2_ORD_F32]]) +; CHECK: vmemu(r2+#0) = [[VOUT_ORD_F32]] + + +define dso_local void @store_isordered_f16(ptr %a, ptr %b, ptr %isordered_cmp) local_unnamed_addr { +entry: + %arrayidx_a = getelementptr inbounds nuw half, ptr %a, i32 0 + %arrayidx_b = getelementptr inbounds nuw half, ptr %b, i32 0 + %0 = load <64 x half>, ptr %arrayidx_a, align 2 + %1 = load <64 x half>, ptr %arrayidx_b, align 2 + %.vectorized = fcmp ord <64 x half> %0, %1 + %conv.LS.instance = zext <64 x i1> %.vectorized to <64 x i16> + %arrayidx1 = getelementptr inbounds nuw i16, ptr %isordered_cmp, i32 0 + store <64 x i16> %conv.LS.instance, ptr %arrayidx1, align 2 + ret void +} +; CHECK-LABEL: store_isordered_f16 +; CHECK: [[VOP2_ORD_F16:v[0-9]+]] = vxor([[VOP2_ORD_F16]],[[VOP2_ORD_F16]]) +; CHECK: [[VOP1_ORD_F16:v[0-9]+]] = vmemu(r0+#0) +; CHECK: [[VONES_ORD_F16:v[0-9]+]].h = vsplat([[RONE16]]) +; CHECK: [[Q1_ORD_F16:q[0-9]+]] = vcmp.eq([[VOP1_ORD_F16]].h,[[VOP1_ORD_F16]].h) +; CHECK: [[VOP3_ORD_F16:v[0-9]+]] = vmemu(r1+#0) +; CHECK: [[Q1_ORD_F16]] &= vcmp.eq([[VOP3_ORD_F16]].h,[[VOP3_ORD_F16]].h) +; CHECK: [[VOUT_ORD_F16:v[0-9]+]] = vmux([[Q1_ORD_F16]],[[VONES_ORD_F16]],[[VOP2_ORD_F16]]) +; CHECK: vmemu(r2+#0) = [[VOUT_ORD_F16]] diff --git a/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll b/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll index 5fa5023..fe0f7dd 100644 --- a/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll +++ b/llvm/test/CodeGen/Hexagon/isel-fold-shl-zext.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -mtriple=hexagon-unknown-elf < %s | FileCheck %s ; In ISelLowering, when folding nodes (or (shl xx, s), (zext y)) @@ -11,17 +12,18 @@ target triple = "hexagon" ; Function Attrs: nofree nosync nounwind memory(readwrite, inaccessiblemem: none) define dso_local void @foo(i64* nocapture noundef %buf, i32 %a, i32 %b) local_unnamed_addr { ; CHECK-LABEL: foo: -; CHECK: // %bb.0: // %entry +; CHECK: .cfi_startproc +; CHECK-NEXT: // %bb.0: // %entry ; CHECK-NEXT: { -; CHECK-NEXT: r[[REG0:[0-9]+]] = addasl(r2,r1,#1) -; CHECK-NEXT: r[[REG2:[0-9]+]] = asl(r1,#1) +; CHECK-NEXT: r2 = addasl(r2,r1,#1) +; CHECK-NEXT: r3 = asl(r1,#1) ; CHECK-NEXT: } ; CHECK-NEXT: { -; CHECK-NEXT: r[[REG1:[0-9]+]] = addasl(r[[REG0]],r1,#1) +; CHECK-NEXT: r2 = addasl(r2,r1,#1) ; CHECK-NEXT: } ; CHECK-NEXT: { ; CHECK-NEXT: jumpr r31 -; CHECK-NEXT: memd(r0+#8) = r[[REG2]]:[[REG1]] +; CHECK-NEXT: memd(r0+#8) = r3:2 ; CHECK-NEXT: } entry: %arrayidx = getelementptr inbounds i64, i64* %buf, i32 1 diff --git a/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll b/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll new file mode 100644 index 0000000..93ca3a2 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll @@ -0,0 +1,42 @@ +; Tests lowering of v32i1 to v32f32 + +; RUN: llc -march=hexagon -mattr=+hvxv79,+hvx-length128b,+hvx-ieee-fp \ +; RUN: -stop-after=hexagon-isel %s -o - | FileCheck %s + +define <32 x float> @uitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 { +; CHECK: name: uitofp_i1 +; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}} +; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1 +; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]] +; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]] +; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]] +; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]] +; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]] +; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]] +; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]] +; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]] + %q1 = icmp eq <32 x i16> %in0, %in1 + %fp0 = uitofp <32 x i1> %q1 to <32 x float> + %out = fadd <32 x float> %fp0, %fp0 + ret <32 x float> %out +} + +define <32 x float> @sitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 { +; CHECK: name: sitofp_i1 +; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}} +; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1 +; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]] +; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]] +; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]] +; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]] +; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]] +; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]] +; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]] +; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]] + %q1 = icmp eq <32 x i16> %in0, %in1 + %fp0 = sitofp <32 x i1> %q1 to <32 x float> + %out = fadd <32 x float> %fp0, %fp0 + ret <32 x float> %out +} + +attributes #0 = { nounwind readnone "target-cpu"="hexagonv79" "target-features"="+hvxv79,+hvx-length128b" } diff --git a/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll b/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll deleted file mode 100644 index dfb2bc83..0000000 --- a/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll +++ /dev/null @@ -1,25 +0,0 @@ -; Tests lowering of v32i1 to v32f32 - -; RUN: llc -march=hexagon -mattr=+hvxv79,+hvx-length128b,+hvx-ieee-fp \ -; RUN: -stop-after=hexagon-isel %s -o - | FileCheck %s - -; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}} -; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1 -; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]] -; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]] -; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]] -; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]] -; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]] -; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]] -; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]] -; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]] - -define <32 x float> @uitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 -{ - %q1 = icmp eq <32 x i16> %in0, %in1 - %fp0 = uitofp <32 x i1> %q1 to <32 x float> - %out = fadd <32 x float> %fp0, %fp0 - ret <32 x float> %out -} - -attributes #0 = { nounwind readnone "target-cpu"="hexagonv79" "target-features"="+hvxv79,+hvx-length128b" } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll index 1469d49..4b0acda 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll @@ -1420,3 +1420,61 @@ define double @tanh_f64(double %a) nounwind { %1 = call double @llvm.tanh.f64(double %a) ret double %1 } + +define { double, double } @test_modf_f64(double %a) nounwind { +; RV32IFD-LABEL: test_modf_f64: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFD-NEXT: mv a0, sp +; RV32IFD-NEXT: call modf +; RV32IFD-NEXT: fld fa1, 0(sp) +; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call modf +; RV64IFD-NEXT: fld fa1, 0(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: test_modf_f64: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: mv a2, sp +; RV32I-NEXT: call modf +; RV32I-NEXT: lw a2, 0(sp) +; RV32I-NEXT: lw a3, 4(sp) +; RV32I-NEXT: sw a0, 0(s0) +; RV32I-NEXT: sw a1, 4(s0) +; RV32I-NEXT: sw a2, 8(s0) +; RV32I-NEXT: sw a3, 12(s0) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv a1, sp +; RV64I-NEXT: call modf +; RV64I-NEXT: ld a1, 0(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { double, double } @llvm.modf.f64(double %a) + ret { double, double } %result +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll index 23f660b..01d9ceb 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll @@ -2118,3 +2118,62 @@ define float @tanh_f32(float %a) nounwind { %1 = call float @llvm.tanh.f32(float %a) ret float %1 } + +define { float, float } @test_modf_f32(float %a) nounwind { +; RV32IF-LABEL: test_modf_f32: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: addi a0, sp, 8 +; RV32IF-NEXT: call modff +; RV32IF-NEXT: flw fa1, 8(sp) +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: test_modf_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: addi a0, sp, 4 +; RV64IF-NEXT: call modff +; RV64IF-NEXT: flw fa1, 4(sp) +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: addi a0, sp, 4 +; RV64IFD-NEXT: call modff +; RV64IFD-NEXT: flw fa1, 4(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: test_modf_f32: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: addi a1, sp, 8 +; RV32I-NEXT: call modff +; RV32I-NEXT: lw a1, 8(sp) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f32: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi a1, sp, 4 +; RV64I-NEXT: call modff +; RV64I-NEXT: lw a1, 4(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { float, float } @llvm.modf.f32(float %a) + ret { float, float } %result +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll index ae9b6cc..e7a3f79 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll @@ -911,3 +911,28 @@ define fp128 @tanh(fp128 %a) nounwind { %1 = call fp128 @llvm.tanh.f128(fp128 %a) ret fp128 %1 } + +define { fp128, fp128 } @modf(fp128 %a) nounwind { +; CHECK-LABEL: modf: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -32 +; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; CHECK-NEXT: mv s0, a0 +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: mv a2, sp +; CHECK-NEXT: call modfl +; CHECK-NEXT: ld a2, 0(sp) +; CHECK-NEXT: ld a3, 8(sp) +; CHECK-NEXT: sd a0, 0(s0) +; CHECK-NEXT: sd a1, 8(s0) +; CHECK-NEXT: sd a2, 16(s0) +; CHECK-NEXT: sd a3, 24(s0) +; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 32 +; CHECK-NEXT: ret + %result = call { fp128, fp128 } @llvm.modf.f128(fp128 %a) + ret { fp128, fp128 } %result +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir index f1d17f9f..1361d92 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir @@ -506,8 +506,9 @@ # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices -# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} +# DEBUG-NEXT: .. the first uncovered type index: 1, OK +# DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. the first uncovered type index: 1, OK diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index caeb6e6..aaa08b5 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -2109,3 +2109,85 @@ define double @tanh_f64(double %a) nounwind { %1 = call double @llvm.tanh.f64(double %a) ret double %1 } + +define { double, double } @test_modf_f64(double %a) nounwind { +; RV32IFD-LABEL: test_modf_f64: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFD-NEXT: mv a0, sp +; RV32IFD-NEXT: call modf +; RV32IFD-NEXT: fld fa1, 0(sp) +; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call modf +; RV64IFD-NEXT: fld fa1, 0(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: test_modf_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: mv a2, sp +; RV32IZFINXZDINX-NEXT: call modf +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; +; RV64IZFINXZDINX-LABEL: test_modf_f64: +; RV64IZFINXZDINX: # %bb.0: +; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFINXZDINX-NEXT: mv a1, sp +; RV64IZFINXZDINX-NEXT: call modf +; RV64IZFINXZDINX-NEXT: ld a1, 0(sp) +; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV64IZFINXZDINX-NEXT: ret +; +; RV32I-LABEL: test_modf_f64: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv a3, a2 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a2, sp +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, a3 +; RV32I-NEXT: call modf +; RV32I-NEXT: lw a2, 0(sp) +; RV32I-NEXT: lw a3, 4(sp) +; RV32I-NEXT: sw a0, 0(s0) +; RV32I-NEXT: sw a1, 4(s0) +; RV32I-NEXT: sw a2, 8(s0) +; RV32I-NEXT: sw a3, 12(s0) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv a1, sp +; RV64I-NEXT: call modf +; RV64I-NEXT: ld a1, 0(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { double, double } @llvm.modf.f64(double %a) + ret { double, double } %result +} diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index b1230ae..5f673ac 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -3050,3 +3050,84 @@ define float @tanh_f32(float %a) nounwind { %1 = call float @llvm.tanh.f32(float %a) ret float %1 } + +define { float, float } @test_modf_f32(float %a) nounwind { +; RV32IF-LABEL: test_modf_f32: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: addi a0, sp, 8 +; RV32IF-NEXT: call modff +; RV32IF-NEXT: flw fa1, 8(sp) +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV32IZFINX-LABEL: test_modf_f32: +; RV32IZFINX: # %bb.0: +; RV32IZFINX-NEXT: addi sp, sp, -16 +; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: addi a1, sp, 8 +; RV32IZFINX-NEXT: call modff +; RV32IZFINX-NEXT: lw a1, 8(sp) +; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: addi sp, sp, 16 +; RV32IZFINX-NEXT: ret +; +; RV64IF-LABEL: test_modf_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: addi a0, sp, 4 +; RV64IF-NEXT: call modff +; RV64IF-NEXT: flw fa1, 4(sp) +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV64IZFINX-LABEL: test_modf_f32: +; RV64IZFINX: # %bb.0: +; RV64IZFINX-NEXT: addi sp, sp, -16 +; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFINX-NEXT: addi a1, sp, 4 +; RV64IZFINX-NEXT: call modff +; RV64IZFINX-NEXT: lw a1, 4(sp) +; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFINX-NEXT: addi sp, sp, 16 +; RV64IZFINX-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: addi a0, sp, 4 +; RV64IFD-NEXT: call modff +; RV64IFD-NEXT: flw fa1, 4(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: test_modf_f32: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: addi a1, sp, 8 +; RV32I-NEXT: call modff +; RV32I-NEXT: lw a1, 8(sp) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f32: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi a1, sp, 4 +; RV64I-NEXT: call modff +; RV64I-NEXT: lw a1, 4(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { float, float } @llvm.modf.f32(float %a) + ret { float, float } %result +} diff --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll index a8e26f7..704cb425 100644 --- a/llvm/test/CodeGen/RISCV/fp128.ll +++ b/llvm/test/CodeGen/RISCV/fp128.ll @@ -189,3 +189,46 @@ define fp128 @fminimum(fp128 %x, fp128 %y) { %a = call fp128 @llvm.minimum.fp128(fp128 %x, fp128 %y) ret fp128 %a } + +define { fp128, fp128 } @modf(fp128 %a) nounwind { +; RV32I-LABEL: modf: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -64 +; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: lw a5, 8(a1) +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: addi a0, sp, 24 +; RV32I-NEXT: addi a1, sp, 8 +; RV32I-NEXT: addi a2, sp, 40 +; RV32I-NEXT: sw a3, 8(sp) +; RV32I-NEXT: sw a4, 12(sp) +; RV32I-NEXT: sw a5, 16(sp) +; RV32I-NEXT: sw a6, 20(sp) +; RV32I-NEXT: call modfl +; RV32I-NEXT: lw a0, 24(sp) +; RV32I-NEXT: lw a1, 28(sp) +; RV32I-NEXT: lw a2, 32(sp) +; RV32I-NEXT: lw a3, 36(sp) +; RV32I-NEXT: lw a4, 40(sp) +; RV32I-NEXT: lw a5, 44(sp) +; RV32I-NEXT: lw a6, 48(sp) +; RV32I-NEXT: lw a7, 52(sp) +; RV32I-NEXT: sw a4, 16(s0) +; RV32I-NEXT: sw a5, 20(s0) +; RV32I-NEXT: sw a6, 24(s0) +; RV32I-NEXT: sw a7, 28(s0) +; RV32I-NEXT: sw a0, 0(s0) +; RV32I-NEXT: sw a1, 4(s0) +; RV32I-NEXT: sw a2, 8(s0) +; RV32I-NEXT: sw a3, 12(s0) +; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: ret + %result = call { fp128, fp128 } @llvm.modf.f128(fp128 %a) + ret { fp128, fp128 } %result +} diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll index e16d788..847054d 100644 --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -4417,3 +4417,161 @@ define half @tanh_f16(half %a) nounwind { %1 = call half @llvm.tanh.f16(half %a) ret half %1 } + +define { half, half } @test_modf_f16(half %a) nounwind { +; RV32IZFH-LABEL: test_modf_f16: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fcvt.s.h fa0, fa0 +; RV32IZFH-NEXT: addi a0, sp, 8 +; RV32IZFH-NEXT: call modff +; RV32IZFH-NEXT: flw fa5, 8(sp) +; RV32IZFH-NEXT: fcvt.h.s fa0, fa0 +; RV32IZFH-NEXT: fcvt.h.s fa1, fa5 +; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: addi sp, sp, 16 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: test_modf_f16: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFH-NEXT: fcvt.s.h fa0, fa0 +; RV64IZFH-NEXT: addi a0, sp, 4 +; RV64IZFH-NEXT: call modff +; RV64IZFH-NEXT: flw fa5, 4(sp) +; RV64IZFH-NEXT: fcvt.h.s fa0, fa0 +; RV64IZFH-NEXT: fcvt.h.s fa1, fa5 +; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFH-NEXT: addi sp, sp, 16 +; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: test_modf_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: addi a1, sp, 8 +; RV32IZHINX-NEXT: call modff +; RV32IZHINX-NEXT: lw a1, 8(sp) +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: fcvt.h.s a1, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_modf_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: addi a1, sp, 4 +; RV64IZHINX-NEXT: call modff +; RV64IZHINX-NEXT: lw a1, 4(sp) +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: fcvt.h.s a1, a1 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32I-LABEL: test_modf_f16: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srli a0, a0, 16 +; RV32I-NEXT: call __extendhfsf2 +; RV32I-NEXT: addi a1, sp, 4 +; RV32I-NEXT: call modff +; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: lw a0, 4(sp) +; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f16: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srli a0, a0, 48 +; RV64I-NEXT: call __extendhfsf2 +; RV64I-NEXT: addi a1, sp, 12 +; RV64I-NEXT: call modff +; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: lw a0, 12(sp) +; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret +; +; RV32IZFHMIN-LABEL: test_modf_f16: +; RV32IZFHMIN: # %bb.0: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0 +; RV32IZFHMIN-NEXT: addi a0, sp, 8 +; RV32IZFHMIN-NEXT: call modff +; RV32IZFHMIN-NEXT: flw fa5, 8(sp) +; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0 +; RV32IZFHMIN-NEXT: fcvt.h.s fa1, fa5 +; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: addi sp, sp, 16 +; RV32IZFHMIN-NEXT: ret +; +; RV64IZFHMIN-LABEL: test_modf_f16: +; RV64IZFHMIN: # %bb.0: +; RV64IZFHMIN-NEXT: addi sp, sp, -16 +; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0 +; RV64IZFHMIN-NEXT: addi a0, sp, 4 +; RV64IZFHMIN-NEXT: call modff +; RV64IZFHMIN-NEXT: flw fa5, 4(sp) +; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0 +; RV64IZFHMIN-NEXT: fcvt.h.s fa1, fa5 +; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFHMIN-NEXT: addi sp, sp, 16 +; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_modf_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: addi a1, sp, 8 +; RV32IZHINXMIN-NEXT: call modff +; RV32IZHINXMIN-NEXT: lw a1, 8(sp) +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.h.s a1, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_modf_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: addi a1, sp, 4 +; RV64IZHINXMIN-NEXT: call modff +; RV64IZHINXMIN-NEXT: lw a1, 4(sp) +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a1, a1 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret + %result = call { half, half } @llvm.modf.f16(half %a) + ret { half, half } %result +} diff --git a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll index 9ccd4f1..767ef7e 100644 --- a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll +++ b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll @@ -184,4 +184,29 @@ entry: ret i32 %2 } +define i32 @test_generic_inst(i32 %arg) #0 { +;CHECK-LABEL: test_generic_inst: +;CHECK: ! fake_use: {{.*}} +;CHECK: bne {{.*}} +;CHECK-NEXT: nop + %bar1 = call i32 @bar(i32 %arg) + %even = and i32 %bar1, 1 + %cmp = icmp eq i32 %even, 0 + ; This shouldn't get reordered into a delay slot + call void (...) @llvm.fake.use(i32 %arg) + br i1 %cmp, label %true, label %false +true: + %bar2 = call i32 @bar(i32 %bar1) + br label %cont + +false: + %inc = add nsw i32 %bar1, 1 + br label %cont + +cont: + %ret = phi i32 [ %bar2, %true ], [ %inc, %false ] + ret i32 %ret +} + +declare void @llvm.fake.use(...) attributes #0 = { nounwind "disable-tail-calls"="true" } diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll index dec829f..44cf4e8 100644 --- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll +++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll @@ -911,7 +911,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in. ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: paddb 16(%rsi), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: paddb (%rdx), %xmm0 ; SSE2-NEXT: movdqa %xmm0, (%rcx) @@ -1898,7 +1898,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in. ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: paddb 32(%rsi), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] @@ -4155,7 +4155,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in. ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: paddb 48(%rsi), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE2-NEXT: paddb (%rdx), %xmm2 diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll index 3d4cddb..89b5c33 100644 --- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll +++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll @@ -769,7 +769,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in. ; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: paddb (%rsi), %xmm1 ; SSE2-NEXT: movdqa %xmm1, (%rdx) @@ -1522,7 +1522,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in. ; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0] ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] @@ -3335,7 +3335,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in. ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE2-NEXT: paddb (%rsi), %xmm2 diff --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll index 983c69d..95c2eda 100644 --- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll +++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll @@ -441,10 +441,10 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4 ; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4 -; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; CHECK-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4 -; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpsllw $2, %ymm2, %ymm2 ; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2 ; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -452,10 +452,10 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3 ; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3 -; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; CHECK-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3 -; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpsllw $2, %ymm1, %ymm1 ; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0 ; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/gfni-shifts.ll b/llvm/test/CodeGen/X86/gfni-shifts.ll index cd16651..feac3dc 100644 --- a/llvm/test/CodeGen/X86/gfni-shifts.ll +++ b/llvm/test/CodeGen/X86/gfni-shifts.ll @@ -166,10 +166,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm3, %xmm4 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm3, %xmm4 -; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; GFNIAVX1OR2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm3, %xmm4 -; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1OR2-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm2, %xmm2 ; GFNIAVX1OR2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -177,10 +177,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm0, %xmm3 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm0, %xmm3 -; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; GFNIAVX1OR2-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm0, %xmm3 -; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; GFNIAVX1OR2-NEXT: vpsllw $2, %xmm1, %xmm1 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -896,10 +896,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -907,10 +907,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6 +; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -920,10 +920,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -931,10 +931,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm4 -; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5 +; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm4 -; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vpsllw $2, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 @@ -949,10 +949,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm3, %ymm4 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpsraw $2, %ymm3, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; GFNIAVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpsraw $1, %ymm3, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -960,10 +960,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm3 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm3 -; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm3 -; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpsllw $2, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -977,10 +977,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -988,10 +988,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; GFNIAVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -2027,10 +2027,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm7, %xmm8 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7 ; GFNIAVX1-NEXT: vpsraw $2, %xmm7, %xmm8 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7 +; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm9 +; GFNIAVX1-NEXT: vpblendvb %xmm9, %xmm8, %xmm7, %xmm7 ; GFNIAVX1-NEXT: vpsraw $1, %xmm7, %xmm8 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpsllw $2, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2038,10 +2038,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 +; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm8 +; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4 @@ -2051,10 +2051,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6 +; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm8 +; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpsllw $2, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2062,10 +2062,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm0, %xmm0 @@ -2078,10 +2078,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 +; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm8 +; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2089,10 +2089,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 @@ -2102,10 +2102,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm4 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2113,10 +2113,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm1, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpsraw $2, %xmm1, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpsraw $1, %xmm1, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1 @@ -2131,10 +2131,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm5, %ymm6 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 ; GFNIAVX2-NEXT: vpsraw $2, %ymm5, %ymm6 -; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4 -; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 +; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm7 +; GFNIAVX2-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5 ; GFNIAVX2-NEXT: vpsraw $1, %ymm5, %ymm6 -; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4 +; GFNIAVX2-NEXT: vpsllw $2, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2142,10 +2142,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm5 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm6 +; GFNIAVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 @@ -2155,10 +2155,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm4, %ymm5 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpsraw $2, %ymm4, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; GFNIAVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpsraw $1, %ymm4, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; GFNIAVX2-NEXT: vpsllw $2, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2166,10 +2166,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm1, %ymm4 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsraw $2, %ymm1, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; GFNIAVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsraw $1, %ymm1, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 @@ -2185,10 +2185,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm5, %ymm6 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm5, %ymm6 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 +; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm7 +; GFNIAVX512VL-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm5, %ymm6 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2196,10 +2196,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 +; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm6 +; GFNIAVX512VL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm2, %ymm2 @@ -2209,10 +2209,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; GFNIAVX512VL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2220,10 +2220,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm4 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 +; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm5 +; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0 @@ -2239,11 +2239,11 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 -; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 +; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5 +; GFNIAVX512BW-NEXT: vpmovb2m %zmm5, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 +; GFNIAVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 @@ -2253,11 +2253,11 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 -; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1 +; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4 +; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 +; GFNIAVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/isel-fpclass.ll b/llvm/test/CodeGen/X86/isel-fpclass.ll index 960bbf5..df04b67 100644 --- a/llvm/test/CodeGen/X86/isel-fpclass.ll +++ b/llvm/test/CodeGen/X86/isel-fpclass.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86-SDAGISEL +; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefixes=X64,X64-SDAGISEL ; RUN: llc < %s -mtriple=i686-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X86-FASTISEL ; RUN: llc < %s -mtriple=x86_64-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X64,X64-FASTISEL +; RUN: llc < %s -mtriple=i686-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X64,X64-GISEL -; FIXME: We can reuse/delete llvm/test/CodeGen/X86/is_fpclass.ll when all patches are included. - -define i1 @isnone_f(float %x) { -; X86-SDAGISEL-LABEL: isnone_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: xorl %eax, %eax -; X86-SDAGISEL-NEXT: retl +define i1 @isnone_f(float %x) nounwind { +; X86-LABEL: isnone_f: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl ; ; X64-LABEL: isnone_f: ; X64: # %bb.0: # %entry @@ -28,11 +28,11 @@ entry: ret i1 %0 } -define i1 @isany_f(float %x) { -; X86-SDAGISEL-LABEL: isany_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movb $1, %al -; X86-SDAGISEL-NEXT: retl +define i1 @isany_f(float %x) nounwind { +; X86-LABEL: isany_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl ; ; X64-LABEL: isany_f: ; X64: # %bb.0: # %entry @@ -50,17 +50,17 @@ entry: ret i1 %0 } -define i1 @issignaling_f(float %x) { -; X86-SDAGISEL-LABEL: issignaling_f: -; X86-SDAGISEL: # %bb.0: -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 -; X86-SDAGISEL-NEXT: setl %cl -; X86-SDAGISEL-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001 -; X86-SDAGISEL-NEXT: setge %al -; X86-SDAGISEL-NEXT: andb %cl, %al -; X86-SDAGISEL-NEXT: retl +define i1 @issignaling_f(float %x) nounwind { +; X86-LABEL: issignaling_f: +; X86: # %bb.0: +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setl %cl +; X86-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001 +; X86-NEXT: setge %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: retl ; ; X64-LABEL: issignaling_f: ; X64: # %bb.0: @@ -76,7 +76,6 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-LABEL: issignaling_f: ; X86-FASTISEL: # %bb.0: ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -87,20 +86,19 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-NEXT: setge %al ; X86-FASTISEL-NEXT: andb %cl, %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl %a0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1) ; "snan" ret i1 %a0 } - define i1 @isquiet_f(float %x) { -; X86-SDAGISEL-LABEL: isquiet_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 -; X86-SDAGISEL-NEXT: setge %al -; X86-SDAGISEL-NEXT: retl + define i1 @isquiet_f(float %x) nounwind { +; X86-LABEL: isquiet_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setge %al +; X86-NEXT: retl ; ; X64-LABEL: isquiet_f: ; X64: # %bb.0: # %entry @@ -113,7 +111,6 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-LABEL: isquiet_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -121,21 +118,20 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 ; X86-FASTISEL-NEXT: setge %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 2) ; "qnan" ret i1 %0 } -define i1 @not_isquiet_f(float %x) { -; X86-SDAGISEL-LABEL: not_isquiet_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 -; X86-SDAGISEL-NEXT: setl %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_isquiet_f(float %x) nounwind { +; X86-LABEL: not_isquiet_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setl %al +; X86-NEXT: retl ; ; X64-LABEL: not_isquiet_f: ; X64: # %bb.0: # %entry @@ -148,7 +144,6 @@ define i1 @not_isquiet_f(float %x) { ; X86-FASTISEL-LABEL: not_isquiet_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -156,21 +151,20 @@ define i1 @not_isquiet_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 ; X86-FASTISEL-NEXT: setl %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1021) ; ~"qnan" ret i1 %0 } -define i1 @isinf_f(float %x) { -; X86-SDAGISEL-LABEL: isinf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: sete %al -; X86-SDAGISEL-NEXT: retl +define i1 @isinf_f(float %x) nounwind { +; X86-LABEL: isinf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: sete %al +; X86-NEXT: retl ; ; X64-LABEL: isinf_f: ; X64: # %bb.0: # %entry @@ -183,7 +177,6 @@ define i1 @isinf_f(float %x) { ; X86-FASTISEL-LABEL: isinf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -191,21 +184,20 @@ define i1 @isinf_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: sete %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 516) ; 0x204 = "inf" ret i1 %0 } -define i1 @not_isinf_f(float %x) { -; X86-SDAGISEL-LABEL: not_isinf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setne %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_isinf_f(float %x) nounwind { +; X86-LABEL: not_isinf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setne %al +; X86-NEXT: retl ; ; X64-LABEL: not_isinf_f: ; X64: # %bb.0: # %entry @@ -218,7 +210,6 @@ define i1 @not_isinf_f(float %x) { ; X86-FASTISEL-LABEL: not_isinf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -226,19 +217,18 @@ define i1 @not_isinf_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setne %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 507) ; ~0x204 = "~inf" ret i1 %0 } -define i1 @is_plus_inf_f(float %x) { -; X86-SDAGISEL-LABEL: is_plus_inf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: sete %al -; X86-SDAGISEL-NEXT: retl +define i1 @is_plus_inf_f(float %x) nounwind { +; X86-LABEL: is_plus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 +; X86-NEXT: sete %al +; X86-NEXT: retl ; ; X64-LABEL: is_plus_inf_f: ; X64: # %bb.0: # %entry @@ -250,25 +240,23 @@ define i1 @is_plus_inf_f(float %x) { ; X86-FASTISEL-LABEL: is_plus_inf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $2139095040, (%esp) # imm = 0x7F800000 ; X86-FASTISEL-NEXT: sete %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 512) ; 0x200 = "+inf" ret i1 %0 } -define i1 @is_minus_inf_f(float %x) { -; X86-SDAGISEL-LABEL: is_minus_inf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 -; X86-SDAGISEL-NEXT: sete %al -; X86-SDAGISEL-NEXT: retl +define i1 @is_minus_inf_f(float %x) nounwind { +; X86-LABEL: is_minus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 +; X86-NEXT: sete %al +; X86-NEXT: retl ; ; X64-LABEL: is_minus_inf_f: ; X64: # %bb.0: # %entry @@ -280,25 +268,23 @@ define i1 @is_minus_inf_f(float %x) { ; X86-FASTISEL-LABEL: is_minus_inf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $-8388608, (%esp) # imm = 0xFF800000 ; X86-FASTISEL-NEXT: sete %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 4) ; "-inf" ret i1 %0 } -define i1 @not_is_minus_inf_f(float %x) { -; X86-SDAGISEL-LABEL: not_is_minus_inf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 -; X86-SDAGISEL-NEXT: setne %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_is_minus_inf_f(float %x) nounwind { +; X86-LABEL: not_is_minus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 +; X86-NEXT: setne %al +; X86-NEXT: retl ; ; X64-LABEL: not_is_minus_inf_f: ; X64: # %bb.0: # %entry @@ -310,27 +296,25 @@ define i1 @not_is_minus_inf_f(float %x) { ; X86-FASTISEL-LABEL: not_is_minus_inf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $-8388608, (%esp) # imm = 0xFF800000 ; X86-FASTISEL-NEXT: setne %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1019) ; ~"-inf" ret i1 %0 } -define i1 @isfinite_f(float %x) { -; X86-SDAGISEL-LABEL: isfinite_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setl %al -; X86-SDAGISEL-NEXT: retl +define i1 @isfinite_f(float %x) nounwind { +; X86-LABEL: isfinite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setl %al +; X86-NEXT: retl ; ; X64-LABEL: isfinite_f: ; X64: # %bb.0: # %entry @@ -343,7 +327,6 @@ define i1 @isfinite_f(float %x) { ; X86-FASTISEL-LABEL: isfinite_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -351,21 +334,20 @@ define i1 @isfinite_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setl %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504) ; 0x1f8 = "finite" ret i1 %0 } -define i1 @not_isfinite_f(float %x) { -; X86-SDAGISEL-LABEL: not_isfinite_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setge %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_isfinite_f(float %x) nounwind { +; X86-LABEL: not_isfinite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setge %al +; X86-NEXT: retl ; ; X64-LABEL: not_isfinite_f: ; X64: # %bb.0: # %entry @@ -378,7 +360,6 @@ define i1 @not_isfinite_f(float %x) { ; X86-FASTISEL-LABEL: not_isfinite_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -386,19 +367,18 @@ define i1 @not_isfinite_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setge %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 519) ; ~0x1f8 = "~finite" ret i1 %0 } -define i1 @is_plus_finite_f(float %x) { -; X86-SDAGISEL-LABEL: is_plus_finite_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setb %al -; X86-SDAGISEL-NEXT: retl +define i1 @is_plus_finite_f(float %x) nounwind { +; X86-LABEL: is_plus_finite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 +; X86-NEXT: setb %al +; X86-NEXT: retl ; ; X64-LABEL: is_plus_finite_f: ; X64: # %bb.0: # %entry @@ -410,13 +390,11 @@ define i1 @is_plus_finite_f(float %x) { ; X86-FASTISEL-LABEL: is_plus_finite_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $2139095040, (%esp) # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setb %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 448) ; 0x1c0 = "+finite" @@ -424,10 +402,10 @@ entry: } define i1 @isnone_d(double %x) nounwind { -; X86-SDAGISEL-LABEL: isnone_d: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: xorl %eax, %eax -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isnone_d: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl ; ; X64-LABEL: isnone_d: ; X64: # %bb.0: # %entry @@ -446,10 +424,10 @@ entry: } define i1 @isany_d(double %x) nounwind { -; X86-SDAGISEL-LABEL: isany_d: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movb $1, %al -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isany_d: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl ; ; X64-LABEL: isany_d: ; X64: # %bb.0: # %entry @@ -468,10 +446,10 @@ entry: } define i1 @isnone_f80(x86_fp80 %x) nounwind { -; X86-SDAGISEL-LABEL: isnone_f80: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: xorl %eax, %eax -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isnone_f80: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl ; ; X64-SDAGISEL-LABEL: isnone_f80: ; X64-SDAGISEL: # %bb.0: # %entry @@ -491,16 +469,21 @@ define i1 @isnone_f80(x86_fp80 %x) nounwind { ; X64-FASTISEL-NEXT: fstp %st(0) ; X64-FASTISEL-NEXT: xorl %eax, %eax ; X64-FASTISEL-NEXT: retq +; +; X64-GISEL-LABEL: isnone_f80: +; X64-GISEL: # %bb.0: # %entry +; X64-GISEL-NEXT: xorl %eax, %eax +; X64-GISEL-NEXT: retq entry: %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 0) ret i1 %0 } define i1 @isany_f80(x86_fp80 %x) nounwind { -; X86-SDAGISEL-LABEL: isany_f80: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movb $1, %al -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isany_f80: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl ; ; X64-SDAGISEL-LABEL: isany_f80: ; X64-SDAGISEL: # %bb.0: # %entry @@ -520,6 +503,11 @@ define i1 @isany_f80(x86_fp80 %x) nounwind { ; X64-FASTISEL-NEXT: fstp %st(0) ; X64-FASTISEL-NEXT: movb $1, %al ; X64-FASTISEL-NEXT: retq +; +; X64-GISEL-LABEL: isany_f80: +; X64-GISEL: # %bb.0: # %entry +; X64-GISEL-NEXT: movb $1, %al +; X64-GISEL-NEXT: retq entry: %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 1023) ret i1 %0 diff --git a/llvm/test/CodeGen/X86/isel-smax.ll b/llvm/test/CodeGen/X86/isel-smax.ll index 9c9a48e..1ce0a80 100644 --- a/llvm/test/CodeGen/X86/isel-smax.ll +++ b/llvm/test/CodeGen/X86/isel-smax.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: smax_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovgl %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: smax_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovgl %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: smax_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: smax_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: smax_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smax_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jg .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smax_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.smax.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovgl %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smax_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jg .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: smax_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jg .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smax_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smax_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.smax.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovgl %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovgl %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: smax_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smax_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jg .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smax_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.smax.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovgq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovgq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smax_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: jl .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: smax_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: cmpl %eax, %ecx +; DAG-X86-NEXT: movl %esi, %edi +; DAG-X86-NEXT: sbbl %edx, %edi +; DAG-X86-NEXT: jl .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smax_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smax_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: seta %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setg %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.smax.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-smin.ll b/llvm/test/CodeGen/X86/isel-smin.ll index 7349a7c..bbed3c3 100644 --- a/llvm/test/CodeGen/X86/isel-smin.ll +++ b/llvm/test/CodeGen/X86/isel-smin.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: smin_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovll %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: smin_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovll %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: smin_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: smin_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: smin_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smin_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jl .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smin_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.smin.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovll %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smin_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jl .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: smin_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jl .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smin_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smin_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.smin.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovll %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovll %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: smin_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smin_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jl .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smin_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.smin.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovlq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovlq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smin_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %edi -; X86-NEXT: sbbl %esi, %edi -; X86-NEXT: jl .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: smin_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: cmpl %ecx, %eax +; DAG-X86-NEXT: movl %edx, %edi +; DAG-X86-NEXT: sbbl %esi, %edi +; DAG-X86-NEXT: jl .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smin_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smin_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: setb %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setl %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.smin.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-umax.ll b/llvm/test/CodeGen/X86/isel-umax.ll index a90456c..990af26 100644 --- a/llvm/test/CodeGen/X86/isel-umax.ll +++ b/llvm/test/CodeGen/X86/isel-umax.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: umax_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmoval %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: umax_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmoval %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: umax_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: umax_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: umax_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umax_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: ja .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umax_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.umax.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmoval %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umax_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: ja .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: umax_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: ja .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umax_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umax_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.umax.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmoval %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmoval %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: umax_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umax_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: ja .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umax_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.umax.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovaq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovaq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umax_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: jb .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: umax_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: cmpl %eax, %ecx +; DAG-X86-NEXT: movl %esi, %edi +; DAG-X86-NEXT: sbbl %edx, %edi +; DAG-X86-NEXT: jb .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umax_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umax_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: seta %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: seta %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.umax.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-umin.ll b/llvm/test/CodeGen/X86/isel-umin.ll index 53a0b27..1710b9f 100644 --- a/llvm/test/CodeGen/X86/isel-umin.ll +++ b/llvm/test/CodeGen/X86/isel-umin.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: umin_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovbl %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: umin_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovbl %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: umin_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: umin_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: umin_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umin_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jb .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umin_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.umin.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovbl %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umin_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jb .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: umin_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jb .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umin_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umin_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.umin.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovbl %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovbl %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: umin_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umin_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jb .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umin_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.umin.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovbq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovbq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umin_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %edi -; X86-NEXT: sbbl %esi, %edi -; X86-NEXT: jb .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: umin_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: cmpl %ecx, %eax +; DAG-X86-NEXT: movl %edx, %edi +; DAG-X86-NEXT: sbbl %esi, %edi +; DAG-X86-NEXT: jb .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umin_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umin_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: setb %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setb %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.umin.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/logic-shift.ll b/llvm/test/CodeGen/X86/logic-shift.ll index 96e63d1..104151c 100644 --- a/llvm/test/CodeGen/X86/logic-shift.ll +++ b/llvm/test/CodeGen/X86/logic-shift.ll @@ -129,10 +129,10 @@ define <16 x i8> @or_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, < ; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 +; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6 +; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -140,10 +140,10 @@ define <16 x i8> @or_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, < ; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0 ; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -413,10 +413,10 @@ define <16 x i8> @xor_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 +; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6 +; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -424,10 +424,10 @@ define <16 x i8> @xor_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0 ; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -697,10 +697,10 @@ define <16 x i8> @and_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 +; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6 +; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -708,10 +708,10 @@ define <16 x i8> @and_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0 ; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll index bf04c8d..63bbac12 100644 --- a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll +++ b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll @@ -302,10 +302,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX256-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX256-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX256-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX256-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX256-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX256-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -313,10 +313,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX256-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX256-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX256-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX256-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX256-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX256-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -338,10 +338,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -349,10 +349,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -432,10 +432,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) { ; AVX256VL-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX256VL-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX256VL-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX256VL-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX256VL-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -443,10 +443,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) { ; AVX256VL-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX256VL-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX256VL-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX256VL-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX256VL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll index 9c8729b3..4b8f78d 100644 --- a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll +++ b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll @@ -15,20 +15,20 @@ define <4 x i32> @shuf_rot_v4i32_1032(<4 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_rot_v4i32_1032: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_rot_v4i32_1032: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_rot_v4i32_1032: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <4 x i32> %x, %x %r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 3, i32 2> @@ -44,20 +44,20 @@ define <8 x i32> @shuf_rot_v8i32_10325476(<8 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_rot_v8i32_10325476: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_rot_v8i32_10325476: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_rot_v8i32_10325476: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <8 x i32> %x, %x %r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6> @@ -73,20 +73,20 @@ define <16 x i32> @shuf_rot_v16i32_1032547698111013121514(<16 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_rot_v16i32_1032547698111013121514: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14] +; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_rot_v16i32_1032547698111013121514: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14] +; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_rot_v16i32_1032547698111013121514: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14] +; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <16 x i32> %x, %x %r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> @@ -168,20 +168,20 @@ define <4 x i32> @shuf_shr_v4i32_1U3U(<4 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shr_v4i32_1U3U: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shr_v4i32_1U3U: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shr_v4i32_1U3U: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <4 x i32> %x, %x %r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef> @@ -197,20 +197,20 @@ define <8 x i32> @shuf_shr_v8i32_1U3U5U7U(<8 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shr_v8i32_1U3U5U7U: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shr_v8i32_1U3U5U7U: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shr_v8i32_1U3U5U7U: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <8 x i32> %x, %x %r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef> @@ -226,20 +226,20 @@ define <16 x i32> @shuf_shr_v16i32_U3U5U7U9U11U13U15(<16 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <16 x i32> %x, %x %r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef, i32 9, i32 undef, i32 11, i32 undef, i32 13, i32 undef, i32 15, i32 undef> @@ -288,20 +288,20 @@ define <4 x i32> @shuf_shl_v4i32_U0U2(<4 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shl_v4i32_U0U2: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shl_v4i32_U0U2: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shl_v4i32_U0U2: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <4 x i32> %x, %x %r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 undef, i32 0, i32 undef, i32 2> @@ -317,20 +317,20 @@ define <8 x i32> @shuf_shl_v8i32_U0U2U4U6(<8 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shl_v8i32_U0U2U4U6: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shl_v8i32_U0U2U4U6: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shl_v8i32_U0U2U4U6: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <8 x i32> %x, %x %r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6> @@ -346,20 +346,20 @@ define <16 x i32> @shuf_shl_v16i32_U0U2U4U6U8U10U12U14(<16 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <16 x i32> %x, %x %r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 undef, i32 12, i32 undef, i32 14> diff --git a/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll b/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll index ecd9435..1766b4d 100644 --- a/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll +++ b/llvm/test/CodeGen/X86/shuffle-of-splat-multiuses.ll @@ -58,7 +58,7 @@ define <8 x float> @foo8(<8 x float> %v, ptr%p) nounwind { define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind { ; AVX2-LABEL: undef_splatmask: ; AVX2: # %bb.0: -; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3] +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2] ; AVX2-NEXT: retq %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef> %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef> @@ -68,7 +68,7 @@ define <4 x i32> @undef_splatmask(<4 x i32> %v) nounwind { define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind { ; AVX2-LABEL: undef_splatmask2: ; AVX2: # %bb.0: -; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3] +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2] ; AVX2-NEXT: retq %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 undef> %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef> @@ -78,7 +78,7 @@ define <4 x i32> @undef_splatmask2(<4 x i32> %v) nounwind { define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind { ; AVX2-LABEL: undef_splatmask3: ; AVX2: # %bb.0: -; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3] +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,2,2] ; AVX2-NEXT: retq %res = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 2, i32 undef> %res1 = shufflevector <4 x i32> %res, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 3> @@ -88,7 +88,7 @@ define <4 x i32> @undef_splatmask3(<4 x i32> %v) nounwind { define <4 x i32> @undef_splatmask4(<4 x i32> %v, ptr %p) nounwind { ; AVX2-LABEL: undef_splatmask4: ; AVX2: # %bb.0: -; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,2,3,3] +; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,2,2,2] ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3] ; AVX2-NEXT: vmovaps %xmm0, (%rdi) ; AVX2-NEXT: vmovaps %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/sshl_sat_vec.ll b/llvm/test/CodeGen/X86/sshl_sat_vec.ll index f91758b..10dee14 100644 --- a/llvm/test/CodeGen/X86/sshl_sat_vec.ll +++ b/llvm/test/CodeGen/X86/sshl_sat_vec.ll @@ -602,10 +602,10 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X64-AVX2-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpsraw $2, %xmm3, %xmm4 -; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm6 +; X64-AVX2-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpsraw $1, %xmm3, %xmm4 -; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5 +; X64-AVX2-NEXT: vpsllw $2, %xmm5, %xmm5 ; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpsrlw $8, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -613,10 +613,10 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm4 ; X64-AVX2-NEXT: vpsraw $2, %xmm4, %xmm5 -; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm4 +; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm6 +; X64-AVX2-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; X64-AVX2-NEXT: vpsraw $1, %xmm4, %xmm5 -; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpsllw $2, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm1 ; X64-AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll index 209d6a5..93a692c 100644 --- a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll +++ b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll @@ -1911,13 +1911,13 @@ define <2 x i64> @test_v2f64_ogt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmoval %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: ucomisd %xmm4, %xmm2 ; SSE-32-NEXT: cmoval %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -2031,13 +2031,13 @@ define <2 x i64> @test_v2f64_oge_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmovael %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: ucomisd %xmm4, %xmm2 ; SSE-32-NEXT: cmovael %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -2151,13 +2151,13 @@ define <2 x i64> @test_v2f64_olt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmoval %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: ucomisd %xmm2, %xmm4 ; SSE-32-NEXT: cmoval %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -2269,13 +2269,13 @@ define <2 x i64> @test_v2f64_ole_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmovael %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: ucomisd %xmm2, %xmm4 ; SSE-32-NEXT: cmovael %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -2680,13 +2680,13 @@ define <2 x i64> @test_v2f64_ugt_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmovbl %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: ucomisd %xmm2, %xmm4 ; SSE-32-NEXT: cmovbl %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -2798,13 +2798,13 @@ define <2 x i64> @test_v2f64_uge_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmovbel %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: ucomisd %xmm2, %xmm4 ; SSE-32-NEXT: cmovbel %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -2916,13 +2916,13 @@ define <2 x i64> @test_v2f64_ult_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmovbl %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: ucomisd %xmm4, %xmm2 ; SSE-32-NEXT: cmovbl %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 @@ -3036,13 +3036,13 @@ define <2 x i64> @test_v2f64_ule_q(<2 x i64> %a, <2 x i64> %b, <2 x double> %f1, ; SSE-32-NEXT: movl $0, %edx ; SSE-32-NEXT: cmovbel %ecx, %edx ; SSE-32-NEXT: movd %edx, %xmm3 -; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-32-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-32-NEXT: ucomisd %xmm4, %xmm2 ; SSE-32-NEXT: cmovbel %ecx, %eax ; SSE-32-NEXT: movd %eax, %xmm2 -; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE-32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-32-NEXT: pand %xmm3, %xmm0 ; SSE-32-NEXT: pandn %xmm1, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll index 9ecc629..b378dce 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll @@ -162,7 +162,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind { define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind { ; SSE2-LABEL: splatvar_funnnel_v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; SSE2-NEXT: pslld $23, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -182,7 +182,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; SSE41-LABEL: splatvar_funnnel_v2i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: pslld $23, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -200,7 +200,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; AVX1-LABEL: splatvar_funnnel_v2i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -277,7 +277,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; XOPAVX1-LABEL: splatvar_funnnel_v2i32: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0 ; XOPAVX1-NEXT: retq ; @@ -289,7 +289,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; X86-SSE2-LABEL: splatvar_funnnel_v2i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X86-SSE2-NEXT: pslld $23, %xmm1 ; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll index 322ebe2..06ff7e7 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll @@ -250,7 +250,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) nounwind { ; SSE2-LABEL: splatvar_funnnel_v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31] ; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: pandn %xmm4, %xmm5 @@ -286,7 +286,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; SSE41-LABEL: splatvar_funnnel_v2i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE41-NEXT: pmovsxbd {{.*#+}} xmm3 = [31,31,31,31] ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: pandn %xmm3, %xmm4 @@ -316,7 +316,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; AVX1-LABEL: splatvar_funnnel_v2i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31] ; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero @@ -423,7 +423,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; XOPAVX1-LABEL: splatvar_funnnel_v2i32: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31] ; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm4 ; XOPAVX1-NEXT: vpshld %xmm4, %xmm0, %xmm0 @@ -450,7 +450,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; X86-SSE2-LABEL: splatvar_funnnel_v2i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31] ; X86-SSE2-NEXT: movdqa %xmm3, %xmm5 ; X86-SSE2-NEXT: pandn %xmm4, %xmm5 diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll index 20be579..9b52857 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -536,14 +536,14 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) ; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm5 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm6 ; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm4 -; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm4 -; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm4 -; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm6 +; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm5 +; AVX1-NEXT: vpsllw $2, %xmm4, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm5 +; AVX1-NEXT: vpsllw $3, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $23, %xmm3, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll index 1f16463..a387562 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -328,15 +328,15 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> % ; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm6 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7 ; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm8 -; AVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5 -; AVX1-NEXT: vpsrlw $4, %xmm5, %xmm7 -; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpsrlw $2, %xmm5, %xmm7 -; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6 -; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm7 -; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6 -; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7 +; AVX1-NEXT: vpsrlw $4, %xmm7, %xmm8 +; AVX1-NEXT: vpblendvb %xmm6, %xmm8, %xmm7, %xmm6 +; AVX1-NEXT: vpsrlw $2, %xmm6, %xmm7 +; AVX1-NEXT: vpsllw $2, %xmm5, %xmm8 +; AVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm7 +; AVX1-NEXT: vpsllw $3, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5 ; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm6 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm6[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $23, %xmm4, %xmm7 @@ -358,14 +358,14 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> % ; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm8 ; AVX1-NEXT: vpblendvb %xmm6, %xmm8, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm6 -; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm6 -; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm7 -; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm6 -; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm7 -; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm8 +; AVX1-NEXT: vpblendvb %xmm7, %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm7 +; AVX1-NEXT: vpsllw $2, %xmm6, %xmm8 +; AVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm7 +; AVX1-NEXT: vpsllw $3, %xmm6, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm1, %xmm1 ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $23, %xmm3, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll index 178c02f..ef5ffe4 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll @@ -172,7 +172,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind { define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind { ; SSE2-LABEL: splatvar_funnnel_v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: psubd %xmm1, %xmm2 ; SSE2-NEXT: pslld $23, %xmm2 @@ -194,7 +194,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; SSE41-LABEL: splatvar_funnnel_v2i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: psubd %xmm1, %xmm2 ; SSE41-NEXT: pslld $23, %xmm2 @@ -214,7 +214,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; AVX1-LABEL: splatvar_funnnel_v2i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 @@ -293,7 +293,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; XOPAVX1-LABEL: splatvar_funnnel_v2i32: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0 @@ -309,7 +309,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind ; ; X86-SSE2-LABEL: splatvar_funnnel_v2i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubd %xmm1, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-fshr-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-sub128.ll index 372deb05..2d8670a 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-sub128.ll @@ -251,7 +251,7 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %amt) nounwind { ; SSE2-LABEL: splatvar_funnnel_v2i32: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31] ; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 @@ -287,7 +287,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; SSE41-LABEL: splatvar_funnnel_v2i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE41-NEXT: pmovsxbd {{.*#+}} xmm3 = [31,31,31,31] ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: pand %xmm3, %xmm4 @@ -317,7 +317,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; AVX1-LABEL: splatvar_funnnel_v2i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31] ; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero @@ -425,7 +425,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; XOPAVX1-LABEL: splatvar_funnnel_v2i32: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] +; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31] ; XOPAVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4 ; XOPAVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 @@ -452,7 +452,7 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> % ; ; X86-SSE2-LABEL: splatvar_funnnel_v2i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31] ; X86-SSE2-NEXT: movdqa %xmm3, %xmm5 ; X86-SSE2-NEXT: pand %xmm4, %xmm5 diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll index f57efb4..1e11ea9 100644 --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -1409,11 +1409,11 @@ define <2 x i64> @load_sext_2i1_to_2i64(ptr%ptr) { ; X86-SSE2-NEXT: movzbl %al, %eax ; X86-SSE2-NEXT: negl %eax ; X86-SSE2-NEXT: movd %eax, %xmm0 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] ; X86-SSE2-NEXT: andl $1, %ecx ; X86-SSE2-NEXT: negl %ecx ; X86-SSE2-NEXT: movd %ecx, %xmm0 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X86-SSE2-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll index 02f0f53..d565ef0 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -293,14 +293,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v8i16: @@ -494,10 +494,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -505,10 +505,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll index 15855e3..249bcba 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -237,29 +237,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; AVX1-NEXT: vpsraw $8, %xmm4, %xmm5 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; AVX1-NEXT: vpsraw $4, %xmm2, %xmm4 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsraw $2, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsraw $1, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 +; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm4 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -339,29 +339,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; X86-AVX1-NEXT: vpsraw $8, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; X86-AVX1-NEXT: vpsraw $4, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsraw $2, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsraw $1, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; X86-AVX1-NEXT: vpsraw $2, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsraw $1, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsraw $8, %xmm0, %xmm4 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-AVX1-NEXT: retl ; @@ -393,10 +393,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7 +; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -404,10 +404,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -417,10 +417,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -428,10 +428,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm4 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm4 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 @@ -446,10 +446,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -457,10 +457,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX2-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -498,10 +498,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -509,10 +509,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -534,10 +534,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQVL-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX512DQVL-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX512DQVL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX512DQVL-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQVL-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX512DQVL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -545,10 +545,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQVL-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX512DQVL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512DQVL-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -572,10 +572,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 ; X86-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 +; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7 +; X86-AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; X86-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3 ; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -583,10 +583,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6 +; X86-AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 ; X86-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -596,10 +596,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; X86-AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 ; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -607,10 +607,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 @@ -625,10 +625,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4 ; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; X86-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4 -; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; X86-AVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; X86-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4 -; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; X86-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -636,10 +636,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3 -; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; X86-AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3 -; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; X86-AVX2-NEXT: vpsllw $2, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll index ea0745b..0fb0420 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll @@ -59,10 +59,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 ; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm7 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5 ; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsllw $2, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -70,10 +70,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm2, %ymm2 @@ -83,10 +83,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsllw $2, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -94,10 +94,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm4 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm5, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm0, %ymm0 @@ -113,11 +113,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 -; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 +; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5 +; AVX512BW-NEXT: vpmovb2m %zmm5, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 @@ -127,11 +127,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4 +; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll index f7de8d4..c5d3297 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll @@ -196,14 +196,14 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v4i16: @@ -367,14 +367,14 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v2i16: @@ -568,10 +568,10 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -579,10 +579,10 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -796,10 +796,10 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -807,10 +807,10 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -1024,10 +1024,10 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -1035,10 +1035,10 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll index 1d1697a..8cb2c7b 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll @@ -262,14 +262,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v8i16: diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll index 3a4bb22..606adb4 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll @@ -198,29 +198,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5 +; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -300,29 +300,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; X86-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; X86-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; X86-AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-AVX1-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll index 7928111..57874c4 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll @@ -196,14 +196,14 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v4i16: @@ -367,14 +367,14 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v2i16: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll index dbbfaab..be41945 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -8079,14 +8079,14 @@ define <16 x i16> @pr43230(<16 x i16> %a, <16 x i16> %b) { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll index bd1a48b..7b0f1c9 100644 --- a/llvm/test/CodeGen/X86/vector-zext.ll +++ b/llvm/test/CodeGen/X86/vector-zext.ll @@ -2555,7 +2555,7 @@ entry: define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) { ; SSE2-LABEL: splatshuf_zext_v4i64: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: movdqa %xmm0, %xmm1 @@ -2563,7 +2563,7 @@ define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) { ; ; SSSE3-LABEL: splatshuf_zext_v4i64: ; SSSE3: # %bb.0: -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: movdqa %xmm0, %xmm1 @@ -2571,7 +2571,7 @@ define <4 x i64> @splatshuf_zext_v4i64(<4 x i32> %x) { ; ; SSE41-LABEL: splatshuf_zext_v4i64: ; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: retq diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll index 19a31a6..31ed745 100644 --- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll +++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll @@ -911,7 +911,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in. ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: paddb 16(%rsi), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: paddb (%rdx), %xmm0 ; SSE2-NEXT: movdqa %xmm0, (%rcx) @@ -1898,7 +1898,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in. ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: paddb 32(%rsi), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] @@ -4610,7 +4610,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in. ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: paddb 48(%rsi), %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: pxor %xmm1, %xmm1 @@ -6544,7 +6544,7 @@ define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in. ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: paddb (%rsi), %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: movdqa 16(%rdx), %xmm1 ; SSE2-NEXT: paddb %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll index 239472c..5b4cdd2 100644 --- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll +++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll @@ -769,7 +769,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in. ; SSE2-LABEL: vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: paddb (%rsi), %xmm1 ; SSE2-NEXT: movdqa %xmm1, (%rdx) @@ -1522,7 +1522,7 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in. ; SSE2-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0] ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] @@ -3660,7 +3660,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in. ; SSE2-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = mem[0,0,0,0] ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-NEXT: pxor %xmm0, %xmm0 @@ -5250,7 +5250,7 @@ define void @vec512_i16_widen_to_i256_factor16_broadcast_to_v2i256_factor2(ptr % define void @vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8(ptr %in.elt.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind { ; SSE2-LABEL: vec512_i32_widen_to_i64_factor2_broadcast_to_v8i64_factor8: ; SSE2: # %bb.0: -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[0,0,0,0] ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 diff --git a/llvm/test/DebugInfo/symbolize-build-id.test b/llvm/test/DebugInfo/symbolize-build-id.test index d63f43f..2620718 100644 --- a/llvm/test/DebugInfo/symbolize-build-id.test +++ b/llvm/test/DebugInfo/symbolize-build-id.test @@ -21,6 +21,7 @@ Sections: Type: SHT_NOTE Flags: [ SHF_ALLOC ] Content: 040000000800000003000000474e5500abb50d82b6bdc861 + AddressAlign: 4 ProgramHeaders: - Type: PT_NOTE Flags: [ PF_R ] diff --git a/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s b/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s index 3c69361..80a340c 100644 --- a/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s +++ b/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s @@ -178,6 +178,7 @@ max_vgprs: // ASM-NEXT: .amdhsa_next_free_sgpr 32 // ASM-NEXT: .amdhsa_named_barrier_count 3 // ASM-NEXT: .amdhsa_reserve_vcc 0 +// ASM-NEXT: .amdhsa_reserve_xnack_mask 1 // ASM-NEXT: .amdhsa_float_round_mode_32 1 // ASM-NEXT: .amdhsa_float_round_mode_16_64 1 // ASM-NEXT: .amdhsa_float_denorm_mode_32 1 diff --git a/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s b/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s index 776006b..642e62d 100644 --- a/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s +++ b/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s @@ -178,6 +178,7 @@ max_vgprs: // ASM-NEXT: .amdhsa_next_free_sgpr 32 // ASM-NEXT: .amdhsa_named_barrier_count 3 // ASM-NEXT: .amdhsa_reserve_vcc 0 +// ASM-NEXT: .amdhsa_reserve_xnack_mask 1 // ASM-NEXT: .amdhsa_float_round_mode_32 1 // ASM-NEXT: .amdhsa_float_round_mode_16_64 1 // ASM-NEXT: .amdhsa_float_denorm_mode_32 1 diff --git a/llvm/test/TableGen/RegisterClassCopyCost.td b/llvm/test/TableGen/RegisterClassCopyCost.td new file mode 100644 index 0000000..fc65fdb --- /dev/null +++ b/llvm/test/TableGen/RegisterClassCopyCost.td @@ -0,0 +1,31 @@ +// RUN: llvm-tblgen --gen-register-info -I %p/../../include %s 2>&1 | FileCheck %s +// RUN: not llvm-tblgen --gen-register-info -I %p/../../include -DERROR %s 2>&1 | FileCheck -check-prefix=ERROR %s + +// Check that there is no assertion when specifying unsupported +// CopyCost values on register classes. Check that negative CopyCost +// values are saturated to 255. + +include "llvm/Target/Target.td" + +// CHECK: extern const MCRegisterClass MyTargetMCRegisterClasses[] = { +// CHECK-NEXT: { GPR32, GPR32Bits, 0, 2, sizeof(GPR32Bits), MyTarget::GPR32RegClassID, 32, 1, true, false }, +// CHECK-NEXT: { SPECIAL_CLASS, SPECIAL_CLASSBits, 6, 1, sizeof(SPECIAL_CLASSBits), MyTarget::SPECIAL_CLASSRegClassID, 32, 255, true, false }, +// CHECK-NEXT: }; + +def MyTargetISA : InstrInfo; +def MyTarget : Target { let InstructionSet = MyTargetISA; } + +def R0 : Register<"r0"> { let Namespace = "MyTarget"; } +def R1 : Register<"r1"> { let Namespace = "MyTarget"; } +def SPECIAL : Register<"special"> { let Namespace = "MyTarget"; } + +// ERROR: :[[@LINE+1]]:5: error: 'CopyCost' must be an 8-bit value +def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0, R1)> { +#ifdef ERROR + let CopyCost = 256; +#endif +} + +def SPECIAL_CLASS : RegisterClass<"MyTarget", [i32], 32, (add SPECIAL)> { + let CopyCost = -1; +} diff --git a/llvm/test/Transforms/GVN/masked-load-store-no-mem-dep.ll b/llvm/test/Transforms/GVN/masked-load-store-no-mem-dep.ll new file mode 100644 index 0000000..512ea37 --- /dev/null +++ b/llvm/test/Transforms/GVN/masked-load-store-no-mem-dep.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=gvn -S -enable-gvn-memdep=true < %s | FileCheck %s +; RUN: opt -passes=gvn -S -enable-gvn-memdep=false < %s | FileCheck %s --check-prefix=MEMDEPFALSE + +define <4 x float> @forward_binop_with_sel(ptr %0, ptr %1, i32 %a, i32 %b, <4 x float> %passthrough) { +; CHECK-LABEL: @forward_binop_with_sel( +; CHECK-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[FMUL]], <4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <4 x float> [[TMP3]] +; +; MEMDEPFALSE-LABEL: @forward_binop_with_sel( +; MEMDEPFALSE-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; MEMDEPFALSE-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; MEMDEPFALSE-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; MEMDEPFALSE-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; MEMDEPFALSE-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; MEMDEPFALSE-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]]) +; MEMDEPFALSE-NEXT: [[LOAD_1_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP1]], i32 1, <4 x i1> [[MASK]], <4 x float> [[PASSTHROUGH:%.*]]) +; MEMDEPFALSE-NEXT: ret <4 x float> [[LOAD_1_0]] +; + %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %a, i32 %b) + %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %fmul = fmul <4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask) + %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> %passthrough) + ret <4 x float> %load.1.0 +} diff --git a/llvm/test/Transforms/GVN/masked-load-store.ll b/llvm/test/Transforms/GVN/masked-load-store.ll index 984a756..b112e99 100644 --- a/llvm/test/Transforms/GVN/masked-load-store.ll +++ b/llvm/test/Transforms/GVN/masked-load-store.ll @@ -36,6 +36,180 @@ define <128 x i8> @f1(ptr %a0, <128 x i8> %a1, <128 x i8> %a2) { ret <128 x i8> %v4 } -declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32, <128 x i1>, <128 x i8>) -declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32, <128 x i1>) +define <4 x float> @forward_masked_load(ptr %0, ptr %1) { +; CHECK-LABEL: @forward_masked_load( +; CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <4 x i1> splat (i1 true)) +; CHECK-NEXT: ret <4 x float> [[TMP4]] +; + %mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 4) + %load1 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + call void @llvm.masked.store.v4f32.p0(<4 x float> %load1, ptr %1, i32 1, <4 x i1> %mask) + %load2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load2 +} + +define <4 x float> @forward_masked_load_arbitrary_mask(ptr %loc_a, ptr %loc_b, <4 x i1> %mask) { +; CHECK-LABEL: @forward_masked_load_arbitrary_mask( +; CHECK-NEXT: [[LOAD1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[LOC_A:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x float> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[LOAD1]], ptr [[LOC_B:%.*]], i32 1, <4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP1:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[LOAD1]], <4 x float> zeroinitializer +; CHECK-NEXT: ret <4 x float> [[TMP1]] +; + %load1 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %loc_a, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + call void @llvm.masked.store.v4f32.p0(<4 x float> %load1, ptr %loc_b, i32 1, <4 x i1> %mask) + %load2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %loc_b, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load2 +} + +define <4 x float> @forward_binop_splat_i1_mask(ptr %0, ptr %1) { +; CHECK-LABEL: @forward_binop_splat_i1_mask( +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> splat (i1 true)) +; CHECK-NEXT: ret <4 x float> [[FMUL]] +; + %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 4) + %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %fmul = fmul <4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask) + %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load.1.0 +} + +define <4 x float> @forward_binop_with_sel(ptr %0, ptr %1, i32 %a, i32 %b, <4 x float> %passthrough) { +; CHECK-LABEL: @forward_binop_with_sel( +; CHECK-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[FMUL]], <4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <4 x float> [[TMP3]] +; + %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %a, i32 %b) + %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %fmul = fmul <4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask) + %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> %passthrough) + ret <4 x float> %load.1.0 +} + +define <vscale x 4 x float> @forward_masked_load_scalable(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @forward_masked_load_scalable( +; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[TMP4]], <vscale x 4 x float> [[PASSTHROUGH]] +; CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]] +; + %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load1 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %load1, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load2 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load2 +} +define <vscale x 4 x float> @forward_masked_load_scalable_type_mismatch(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @forward_masked_load_scalable_type_mismatch( +; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD1:%.*]] = call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x double> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> [[LOAD1]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK]]) +; CHECK-NEXT: [[LOAD2:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD2]] +; + %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load1 = call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> zeroinitializer) + call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> %load1, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load2 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load2 +} + +define <vscale x 4 x float> @generate_sel_with_passthrough(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @generate_sel_with_passthrough( +; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[TMP4]], <vscale x 4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]] +; + %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load1 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer) + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %load1, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load2 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load2 +} + +define <vscale x 4 x float> @forward_binop_with_sel_scalable(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @forward_binop_with_sel_scalable( +; CHECK-NEXT: [[MASK:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP3:%.*]] = select <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> [[FMUL]], <vscale x 4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <vscale x 4 x float> [[TMP3]] +; + %mask = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer) + %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load.1.0 +} + +define <vscale x 4 x float> @load_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @load_mask_differs( +; CHECK-NEXT: [[MASK0:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) +; CHECK-NEXT: [[MASK1:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]]) +; CHECK-NEXT: [[LOAD_1_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK1]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD_1_0]] +; + %mask0 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) + %mask1 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask0) + %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask1, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load.1.0 +} + +define <vscale x 4 x float> @store_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @store_mask_differs( +; CHECK-NEXT: [[MASK0:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) +; CHECK-NEXT: [[MASK1:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK1]]) +; CHECK-NEXT: [[LOAD_1_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD_1_0]] +; + %mask0 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) + %mask1 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask1) + %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load.1.0 +} diff --git a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll index 8f76834..67ab167 100644 --- a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll @@ -16,6 +16,14 @@ define <2 x double> @load_zeromask(ptr %ptr, <2 x double> %passthru) { ret <2 x double> %res } +define <2 x double> @load_zero_withpoison_mask(ptr %ptr, <2 x double> %passthru) { +; CHECK-LABEL: @load_zero_withpoison_mask( +; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]] +; + %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 1, <2 x i1> <i1 0, i1 poison>, <2 x double> %passthru) + ret <2 x double> %res +} + define <2 x double> @load_onemask(ptr %ptr, <2 x double> %passthru) { ; CHECK-LABEL: @load_onemask( ; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <2 x double>, ptr [[PTR:%.*]], align 2 @@ -150,6 +158,14 @@ define void @store_zeromask(ptr %ptr, <2 x double> %val) { ret void } +define void @store_poisonmask(ptr %ptr, <2 x double> %val) { +; CHECK-LABEL: @store_poisonmask( +; CHECK-NEXT: ret void +; + call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> splat(i1 poison)) + ret void +} + define void @store_onemask(ptr %ptr, <2 x double> %val) { ; CHECK-LABEL: @store_onemask( ; CHECK-NEXT: store <2 x double> [[VAL:%.*]], ptr [[PTR:%.*]], align 4 @@ -159,6 +175,15 @@ define void @store_onemask(ptr %ptr, <2 x double> %val) { ret void } +define void @store_one_withpoison_mask(ptr %ptr, <2 x double> %val) { +; CHECK-LABEL: @store_one_withpoison_mask( +; CHECK-NEXT: store <2 x double> [[VAL:%.*]], ptr [[PTR:%.*]], align 4 +; CHECK-NEXT: ret void +; + call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 4, <2 x i1> <i1 1, i1 poison>) + ret void +} + define void @store_demandedelts(ptr %ptr, double %val) { ; CHECK-LABEL: @store_demandedelts( ; CHECK-NEXT: [[VALVEC1:%.*]] = insertelement <2 x double> poison, double [[VAL:%.*]], i64 0 @@ -189,6 +214,13 @@ define <2 x double> @gather_zeromask(<2 x ptr> %ptrs, <2 x double> %passthru) { ret <2 x double> %res } +define <2 x double> @gather_zero_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %passthru) { +; CHECK-LABEL: @gather_zero_withpoison_mask( +; CHECK-NEXT: ret <2 x double> [[PASSTHRU:%.*]] +; + %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> <i1 0, i1 poison>, <2 x double> %passthru) + ret <2 x double> %res +} define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) { ; CHECK-LABEL: @gather_onemask( @@ -199,6 +231,15 @@ define <2 x double> @gather_onemask(<2 x ptr> %ptrs, <2 x double> %passthru) { ret <2 x double> %res } +define <2 x double> @gather_one_withpoisonmask(<2 x ptr> %ptrs, <2 x double> %passthru) { +; CHECK-LABEL: @gather_one_withpoisonmask( +; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> [[PTRS:%.*]], i32 4, <2 x i1> <i1 true, i1 poison>, <2 x double> [[PASSTHRU:%.*]]) +; CHECK-NEXT: ret <2 x double> [[RES]] +; + %res = call <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> <i1 true, i1 poison>, <2 x double> %passthru) + ret <2 x double> %res +} + define <4 x double> @gather_lane2(ptr %base, double %pt) { ; CHECK-LABEL: @gather_lane2( ; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[BASE:%.*]], <4 x i64> <i64 poison, i64 poison, i64 2, i64 poison> @@ -257,6 +298,23 @@ define void @scatter_zeromask(<2 x ptr> %ptrs, <2 x double> %val) { ret void } +define void @scatter_zero_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %val) { +; CHECK-LABEL: @scatter_zero_withpoison_mask( +; CHECK-NEXT: ret void +; + call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32 8, <2 x i1> <i1 0, i1 poison>) + ret void +} + +define void @scatter_one_withpoison_mask(<2 x ptr> %ptrs, <2 x double> %val) { +; CHECK-LABEL: @scatter_one_withpoison_mask( +; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[VAL:%.*]], <2 x ptr> [[PTRS:%.*]], i32 8, <2 x i1> <i1 true, i1 poison>) +; CHECK-NEXT: ret void +; + call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> %val, <2 x ptr> %ptrs, i32 8, <2 x i1> <i1 1, i1 poison>) + ret void +} + define void @scatter_demandedelts(ptr %ptr, double %val) { ; CHECK-LABEL: @scatter_demandedelts( ; CHECK-NEXT: [[PTRS:%.*]] = getelementptr double, ptr [[PTR:%.*]], <2 x i64> <i64 0, i64 poison> diff --git a/llvm/test/Transforms/InstCombine/pr83947.ll b/llvm/test/Transforms/InstCombine/pr83947.ll index 1906502..679230a4 100644 --- a/llvm/test/Transforms/InstCombine/pr83947.ll +++ b/llvm/test/Transforms/InstCombine/pr83947.ll @@ -24,7 +24,6 @@ define void @masked_scatter2() { define void @masked_scatter3() { ; CHECK-LABEL: define void @masked_scatter3() { -; CHECK-NEXT: store i32 0, ptr @c, align 4 ; CHECK-NEXT: ret void ; call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> splat (ptr @c), i32 4, <2 x i1> undef) @@ -50,7 +49,6 @@ define void @masked_scatter5() { define void @masked_scatter6() { ; CHECK-LABEL: define void @masked_scatter6() { -; CHECK-NEXT: store i32 0, ptr @c, align 4 ; CHECK-NEXT: ret void ; call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> splat (ptr @c), i32 4, <2 x i1> <i1 undef, i1 false>) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll index c3b0bc8..27ca414 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll @@ -86,7 +86,7 @@ define i64 @test_two_ivs(ptr %a, ptr %b, i64 %start) #0 { ; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %i.iv = phi i64 [ 0, %entry ], [ %i.iv.next, %for.body ] ; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %j.iv = phi i64 [ %start, %entry ], [ %j.iv.next, %for.body ] ; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.+}}> = CANONICAL-INDUCTION ir<0>, vp<%index.next> -; CHECK: Cost for VF 16: 48 +; CHECK: Cost for VF 16: 41 ; CHECK: LV: Selecting VF: 16 entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll index 229209e..5ae0839 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll @@ -204,37 +204,33 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] -; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -670,39 +666,35 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] -; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP21]] = add <vscale x 4 x i32> [[TMP19]], [[TMP20]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -996,36 +988,32 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] -; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[TMP15]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP5]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP18]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1140,32 +1128,28 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP12:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP11]] -; CHECK-SVE-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[TMP12]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP3]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) +; CHECK-SVE-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE2]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1277,36 +1261,32 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP16]], [[TMP17]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP18]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index dd239c0..8ece59a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -81,7 +81,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index 49e9989..09b41fb 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -12,40 +12,40 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32> +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32> ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3 +; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]] -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1 -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1 -; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] -; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP12:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]] +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]]) +; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -62,8 +62,8 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-NOI8MM: vector.body: ; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 @@ -82,14 +82,14 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> ; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] ; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]] +; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]] ; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-NOI8MM: middle.block: -; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NOI8MM-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]] +; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-NOI8MM: scalar.ph: @@ -123,40 +123,40 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32> +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32> ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3 +; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]] -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1 -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1 -; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] -; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP12:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]] +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]]) +; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -173,8 +173,8 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-NOI8MM: vector.body: ; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 @@ -193,14 +193,14 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> ; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] ; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]] +; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]] ; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-NOI8MM: middle.block: -; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NOI8MM-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]] +; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-NOI8MM: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index 6e11e55..3a88273 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -12,74 +12,62 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP16]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP18]], [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP14]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP20]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP20]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = shl nuw i64 [[TMP26]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP28]], i64 [[TMP27]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x i8>, ptr [[TMP28]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP18]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD3]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul <vscale x 4 x i32> [[TMP19]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP29]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP30]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP8]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP9]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP24]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -139,78 +127,52 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP9]], 2 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP11]] -; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[TMP14]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i64 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i64 @not_dotp_i8_to_i64_has_neon_dotprod( ; CHECK-INTERLEAVED-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP9]], 4 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = shl nuw i64 [[TMP11]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i8>, ptr [[TMP13]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD3]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP17]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 [[TMP18]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i8>, ptr [[TMP19]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD4]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD5]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP20]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP15]] -; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[TMP22]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[TMP23]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]] -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP10]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP4]] ; ; CHECK-MAXBW-LABEL: define i64 @not_dotp_i8_to_i64_has_neon_dotprod( ; CHECK-MAXBW-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1:[0-9]+]] { @@ -274,86 +236,66 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul i64 [[TMP10]], 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP20]] -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = mul i64 [[TMP10]], 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP8]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP]], align 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP2]], align 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD3]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP15]], [[TMP13]] -; CHECK-INTERLEAVE1-NEXT: [[TMP17]] = add <vscale x 2 x i64> [[TMP16]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[NEXT_GEP2]], align 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64> +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP1]], [[TMP0]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP2]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP17]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i64 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i64 @not_dotp_i16_to_i64_has_neon_dotprod( ; CHECK-INTERLEAVED-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP18]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX2]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = shl nuw i64 [[TMP13]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP]], align 2 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i16>, ptr [[TMP30]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD4]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = shl nuw i64 [[TMP19]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 [[TMP20]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP3]], align 2 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 2 x i16>, ptr [[TMP21]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD5]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD6]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP22]], [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP23]], [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 2 x i64> [[TMP24]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP15]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD4]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[NEXT_GEP3]], align 2 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD5]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD6]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <8 x i64> [[TMP5]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP27]], [[TMP26]] -; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP15]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE7]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP9]] ; ; CHECK-MAXBW-LABEL: define i64 @not_dotp_i16_to_i64_has_neon_dotprod( ; CHECK-MAXBW-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1]] { @@ -497,7 +439,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] @@ -656,7 +598,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP141:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP141]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP141]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]] ; CHECK-INTERLEAVED-NEXT: [[TMP142:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -803,7 +745,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP17]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 @@ -851,7 +793,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nuw i32 [[TMP29]], 8 @@ -952,7 +894,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP15]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8 @@ -990,7 +932,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP30]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nuw i32 [[TMP27]], 8 @@ -1058,22 +1000,18 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVE1-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP13]], 2 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP15]] +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 16 ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]] +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 16 ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP41:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP35:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP30:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1085,38 +1023,38 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP36:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = mul nsw <vscale x 4 x i32> [[TMP21]], [[TMP36]] -; CHECK-INTERLEAVE1-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP38]], [[VEC_PHI3]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD5]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 4 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD6]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = mul nsw <vscale x 4 x i32> [[TMP25]], [[TMP42]] -; CHECK-INTERLEAVE1-NEXT: [[TMP30]] = add <vscale x 4 x i32> [[TMP28]], [[VEC_PHI2]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD7]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD8]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = mul nsw <vscale x 4 x i32> [[TMP31]], [[TMP33]] -; CHECK-INTERLEAVE1-NEXT: [[TMP35]] = add <vscale x 4 x i32> [[TMP34]], [[VEC_PHI1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 4 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD9]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD10]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = mul nsw <vscale x 4 x i32> [[TMP37]], [[TMP39]] -; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add <vscale x 4 x i32> [[TMP40]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP12]], [[TMP23]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP15]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP16]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP18]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP19]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP21]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP22]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP41]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP35]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP30]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP46:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP23]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE7]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1124,26 +1062,22 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVED-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP13]], 3 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 32 ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]] +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 32 ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP80:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP81:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP64:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP65:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP49:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP50:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE28:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE29:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE22:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE23:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1155,90 +1089,74 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = shl nuw i64 [[TMP56]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP20]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 4 x i8>, ptr [[TMP21]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP66:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD8]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = shl nuw i64 [[TMP25]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP72:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP26]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 4 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i8>, ptr [[TMP72]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD9]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP82:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD10]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nsw <vscale x 4 x i32> [[TMP28]], [[TMP66]] -; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <vscale x 4 x i32> [[TMP82]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP50]] = add <vscale x 4 x i32> [[TMP30]], [[VEC_PHI6]] -; CHECK-INTERLEAVED-NEXT: [[TMP33]] = add <vscale x 4 x i32> [[TMP31]], [[VEC_PHI7]] -; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = shl nuw i64 [[TMP35]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[TMP36]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <vscale x 4 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <vscale x 4 x i8>, ptr [[TMP37]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD11]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD12]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = shl nuw i64 [[TMP41]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 [[TMP42]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <vscale x 4 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <vscale x 4 x i8>, ptr [[TMP43]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD13]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD14]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = mul nsw <vscale x 4 x i32> [[TMP38]], [[TMP44]] -; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = mul nsw <vscale x 4 x i32> [[TMP39]], [[TMP45]] -; CHECK-INTERLEAVED-NEXT: [[TMP48]] = add <vscale x 4 x i32> [[TMP46]], [[VEC_PHI4]] -; CHECK-INTERLEAVED-NEXT: [[TMP49]] = add <vscale x 4 x i32> [[TMP47]], [[VEC_PHI5]] -; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = shl nuw i64 [[TMP51]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP52]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD16:%.*]] = load <vscale x 4 x i8>, ptr [[TMP53]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP54:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD15]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP55:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD16]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP57:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP58:%.*]] = shl nuw i64 [[TMP57]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP59:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[TMP58]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD17:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <vscale x 4 x i8>, ptr [[TMP59]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP60:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD17]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP61:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD18]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP62:%.*]] = mul nsw <vscale x 4 x i32> [[TMP54]], [[TMP60]] -; CHECK-INTERLEAVED-NEXT: [[TMP63:%.*]] = mul nsw <vscale x 4 x i32> [[TMP55]], [[TMP61]] -; CHECK-INTERLEAVED-NEXT: [[TMP64]] = add <vscale x 4 x i32> [[TMP62]], [[VEC_PHI2]] -; CHECK-INTERLEAVED-NEXT: [[TMP65]] = add <vscale x 4 x i32> [[TMP63]], [[VEC_PHI3]] -; CHECK-INTERLEAVED-NEXT: [[TMP67:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP68:%.*]] = shl nuw i64 [[TMP67]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP69:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i64 [[TMP68]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <vscale x 4 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <vscale x 4 x i8>, ptr [[TMP69]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP70:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD19]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP71:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD20]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP73:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP74:%.*]] = shl nuw i64 [[TMP73]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP75:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP74]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD22:%.*]] = load <vscale x 4 x i8>, ptr [[TMP75]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP76:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD21]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP77:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD22]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP78:%.*]] = mul nsw <vscale x 4 x i32> [[TMP70]], [[TMP76]] -; CHECK-INTERLEAVED-NEXT: [[TMP79:%.*]] = mul nsw <vscale x 4 x i32> [[TMP71]], [[TMP77]] -; CHECK-INTERLEAVED-NEXT: [[TMP80]] = add <vscale x 4 x i32> [[TMP78]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add <vscale x 4 x i32> [[TMP79]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP43]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP17]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP18]]) +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP23]] +; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP24]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP25]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP26]]) +; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP27]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP30]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP31]] +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP48]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP33]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP34]]) +; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP35]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = mul nsw <16 x i32> [[TMP36]], [[TMP39]] +; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP40]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP41]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP42]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP81]], [[TMP80]] -; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX23:%.*]] = add <vscale x 4 x i32> [[TMP65]], [[TMP64]] -; CHECK-INTERLEAVED-NEXT: [[TMP84:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX23]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX24:%.*]] = add <vscale x 4 x i32> [[TMP49]], [[TMP48]] -; CHECK-INTERLEAVED-NEXT: [[TMP85:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX24]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX25:%.*]] = add <vscale x 4 x i32> [[TMP33]], [[TMP50]] -; CHECK-INTERLEAVED-NEXT: [[TMP86:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX25]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE29]], [[PARTIAL_REDUCE28]] +; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX30:%.*]] = add <4 x i32> [[PARTIAL_REDUCE23]], [[PARTIAL_REDUCE22]] +; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX30]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX31:%.*]] = add <4 x i32> [[PARTIAL_REDUCE17]], [[PARTIAL_REDUCE16]] +; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX31]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX32:%.*]] = add <4 x i32> [[PARTIAL_REDUCE11]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX32]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -1396,7 +1314,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] @@ -1434,7 +1352,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] @@ -1525,7 +1443,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1572,7 +1490,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP24]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) @@ -1607,7 +1525,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP24]]) ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1666,7 +1584,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1713,7 +1631,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]] ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) @@ -1748,7 +1666,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1866,7 +1784,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add i32 [[TMP21]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP23]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -1978,7 +1896,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2016,7 +1934,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2053,7 +1971,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2111,7 +2029,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2149,7 +2067,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2186,7 +2104,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2226,36 +2144,32 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVE1: for.body.preheader: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16 ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16 ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP16]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add <vscale x 2 x i64> [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP18]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -2267,50 +2181,32 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVED: for.body.preheader: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16 ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16 ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = shl nuw i64 [[TMP14]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP15]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i8>, ptr [[TMP16]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD3]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = shl nuw i64 [[TMP20]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 [[TMP21]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i8>, ptr [[TMP22]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD4]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD5]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP23]], [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP24]], [[TMP18]] -; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add <vscale x 2 x i64> [[TMP26]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP28]], [[TMP27]] -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -2349,7 +2245,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP20]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2471,7 +2367,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]]) @@ -2571,7 +2467,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) @@ -2671,7 +2567,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll index 11ff688..7bb4715 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll @@ -12,77 +12,65 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP13]] -; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = sub <16 x i32> zeroinitializer, [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP8]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP7]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP7]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = shl nuw i64 [[TMP16]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x i8>, ptr [[TMP14]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP18]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD3]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i32> [[TMP19]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP20]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP21]] -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP22]] -; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 4 x i32> [[VEC_PHI1]], [[TMP24]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP15]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP26]], [[TMP25]] -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index db3166c..3c2ae1c7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -17,16 +17,16 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP3]] = add <16 x i32> [[TMP2]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -38,22 +38,22 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5]] = add <16 x i32> [[TMP3]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i32> [[TMP4]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP6]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -199,16 +199,16 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i64> [[TMP3]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -220,22 +220,22 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i64> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i64> [[TMP5]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP5]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i64> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -293,16 +293,16 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <8 x i64> [[TMP3]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -314,22 +314,22 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP1]], i32 8 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <8 x i64> [[TMP5]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP5]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -764,16 +764,16 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i32> [[TMP3]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -785,22 +785,22 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i32> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[TMP5]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -984,21 +984,21 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 ; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP6]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1015,26 +1015,26 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 ; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP12]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 ; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 ; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add <16 x i32> [[VEC_PHI2]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP3]]) ; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP8]], [[TMP7]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE2]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll index c61361b..25ee100 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll @@ -192,7 +192,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK: LV(REG): VF = 16 ; CHECK-NEXT: LV(REG): Found max usage: 2 item ; CHECK-NEXT: LV(REG): RegisterClass: Generic::ScalarRC, 9 registers -; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 24 registers +; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 12 registers ; CHECK-NEXT: LV(REG): Found invariant usage: 1 item entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll index 9620697..f794620 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll @@ -512,8 +512,8 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]] ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] +; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] ; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END:.*]] ; CHECK: [[LOOP_INC]]: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll b/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll index ed0bd3f..cf62fd5 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/no_alternate_divrem.ll @@ -55,6 +55,54 @@ entry: ret void } +define void @test_add_udiv(ptr %arr1, ptr %arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) { +; CHECK-LABEL: @test_add_udiv( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr i32, ptr [[ARR1:%.*]], i32 2 +; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr i32, ptr [[ARR1]], i32 3 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[GEP1_2]], align 4 +; CHECK-NEXT: [[V3:%.*]] = load i32, ptr [[GEP1_3]], align 4 +; CHECK-NEXT: [[Y2:%.*]] = add nsw i32 [[A2:%.*]], 42 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[ARR1]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> <i32 poison, i32 poison, i32 0, i32 poison>, i32 [[A0:%.*]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[A1:%.*]], i32 1 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[A3:%.*]], i32 3 +; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> <i32 1146, i32 146, i32 0, i32 0>, [[TMP3]] +; CHECK-NEXT: [[RES2:%.*]] = udiv i32 [[V2]], [[Y2]] +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> poison, i32 [[RES2]], i32 2 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[V3]], i32 3 +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 4, i32 5, i32 2, i32 3> +; CHECK-NEXT: [[TMP9:%.*]] = add nsw <4 x i32> [[TMP8]], [[TMP4]] +; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[ARR2:%.*]], align 4 +; CHECK-NEXT: ret void +; +entry: + %gep1.1 = getelementptr i32, ptr %arr1, i32 1 + %gep1.2 = getelementptr i32, ptr %arr1, i32 2 + %gep1.3 = getelementptr i32, ptr %arr1, i32 3 + %gep2.1 = getelementptr i32, ptr %arr2, i32 1 + %gep2.2 = getelementptr i32, ptr %arr2, i32 2 + %gep2.3 = getelementptr i32, ptr %arr2, i32 3 + %v0 = load i32, ptr %arr1 + %v1 = load i32, ptr %gep1.1 + %v2 = load i32, ptr %gep1.2 + %v3 = load i32, ptr %gep1.3 + %y0 = add nsw i32 %a0, 1146 + %y1 = add nsw i32 %a1, 146 + %y2 = add nsw i32 %a2, 42 + %y3 = add nsw i32 %a3, 0 + %res0 = add nsw i32 %v0, %y0 + %res1 = add nsw i32 %v1, %y1 + %res2 = udiv i32 %v2, %y2 + %res3 = add nsw i32 %v3, %y3 + store i32 %res0, ptr %arr2 + store i32 %res1, ptr %gep2.1 + store i32 %res2, ptr %gep2.2 + store i32 %res3, ptr %gep2.3 + ret void +} + ;; Similar test, but now div/rem is main opcode and not the alternate one. Same issue. define void @test_urem_add(ptr %arr1, ptr %arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) { ; CHECK-LABEL: @test_urem_add( @@ -114,3 +162,56 @@ entry: store i32 %res3, ptr %gep2.3 ret void } + +define void @test_srem_add(ptr %arr1, ptr %arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) { +; CHECK-LABEL: @test_srem_add( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr i32, ptr [[ARR1:%.*]], i32 1 +; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr i32, ptr [[ARR1]], i32 2 +; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr i32, ptr [[ARR1]], i32 3 +; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr i32, ptr [[ARR2:%.*]], i32 1 +; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr i32, ptr [[ARR2]], i32 2 +; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr i32, ptr [[ARR2]], i32 3 +; CHECK-NEXT: [[V0:%.*]] = load i32, ptr [[ARR1]], align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[GEP1_1]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[GEP1_2]], align 4 +; CHECK-NEXT: [[V3:%.*]] = load i32, ptr [[GEP1_3]], align 4 +; CHECK-NEXT: [[Y0:%.*]] = add nsw i32 [[A0:%.*]], 1146 +; CHECK-NEXT: [[Y1:%.*]] = add nsw i32 [[A1:%.*]], 146 +; CHECK-NEXT: [[Y2:%.*]] = add nsw i32 [[A2:%.*]], 42 +; CHECK-NEXT: [[Y3:%.*]] = add nsw i32 [[A3:%.*]], 0 +; CHECK-NEXT: [[RES0:%.*]] = srem i32 [[V0]], [[Y0]] +; CHECK-NEXT: [[RES1:%.*]] = srem i32 [[V1]], [[Y1]] +; CHECK-NEXT: [[RES2:%.*]] = srem i32 [[V2]], [[Y2]] +; CHECK-NEXT: [[RES3:%.*]] = add nsw i32 [[V3]], [[Y3]] +; CHECK-NEXT: store i32 [[RES0]], ptr [[ARR2]], align 4 +; CHECK-NEXT: store i32 [[RES1]], ptr [[GEP2_1]], align 4 +; CHECK-NEXT: store i32 [[RES2]], ptr [[GEP2_2]], align 4 +; CHECK-NEXT: store i32 [[RES3]], ptr [[GEP2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %gep1.1 = getelementptr i32, ptr %arr1, i32 1 + %gep1.2 = getelementptr i32, ptr %arr1, i32 2 + %gep1.3 = getelementptr i32, ptr %arr1, i32 3 + %gep2.1 = getelementptr i32, ptr %arr2, i32 1 + %gep2.2 = getelementptr i32, ptr %arr2, i32 2 + %gep2.3 = getelementptr i32, ptr %arr2, i32 3 + %v0 = load i32, ptr %arr1 + %v1 = load i32, ptr %gep1.1 + %v2 = load i32, ptr %gep1.2 + %v3 = load i32, ptr %gep1.3 + %y0 = add nsw i32 %a0, 1146 + %y1 = add nsw i32 %a1, 146 + %y2 = add nsw i32 %a2, 42 + %y3 = add nsw i32 %a3, 0 + %res0 = srem i32 %v0, %y0 + %res1 = srem i32 %v1, %y1 + %res2 = srem i32 %v2, %y2 + %res3 = add nsw i32 %v3, %y3 + store i32 %res0, ptr %arr2 + store i32 %res1, ptr %gep2.1 + store i32 %res2, ptr %gep2.2 + store i32 %res3, ptr %gep2.3 + ret void +} diff --git a/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s b/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s index 3e96ea3..13f20bf 100644 --- a/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s +++ b/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s @@ -20,7 +20,7 @@ ; CHECK-NEXT: ; IMAGE_OP 0 ; CHECK-NEXT: .amdhsa_next_free_vgpr 32 ; CHECK-NEXT: .amdhsa_reserve_vcc 0 -; CHECK-NEXT: .amdhsa_reserve_xnack_mask 0 +; CHECK-NEXT: .amdhsa_reserve_xnack_mask 1 ; CHECK-NEXT: .amdhsa_next_free_sgpr 8 ; CHECK-NEXT: .amdhsa_float_round_mode_32 0 ; CHECK-NEXT: .amdhsa_float_round_mode_16_64 0 @@ -76,7 +76,7 @@ ; CHECK-NEXT: ; IMAGE_OP 0 ; CHECK-NEXT: .amdhsa_next_free_vgpr 32 ; CHECK-NEXT: .amdhsa_reserve_vcc 0 -; CHECK-NEXT: .amdhsa_reserve_xnack_mask 0 +; CHECK-NEXT: .amdhsa_reserve_xnack_mask 1 ; CHECK-NEXT: .amdhsa_next_free_sgpr 8 ; CHECK-NEXT: .amdhsa_float_round_mode_32 0 ; CHECK-NEXT: .amdhsa_float_round_mode_16_64 0 diff --git a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp index b6e8567..497da8f 100644 --- a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp +++ b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp @@ -46,8 +46,8 @@ public: MAM.registerPass([VocabVector = std::move(VocabVector)]() mutable { return IR2VecVocabAnalysis(std::move(VocabVector)); }); - IR2VecVocab = - new ir2vec::Vocabulary(ir2vec::Vocabulary::createDummyVocabForTest(1)); + IR2VecVocab = std::make_unique<ir2vec::Vocabulary>( + ir2vec::Vocabulary::createDummyVocabForTest(1)); MAM.registerPass([&] { return PassInstrumentationAnalysis(); }); FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); FAM.registerPass([&] { return DominatorTreeAnalysis(); }); @@ -69,7 +69,7 @@ protected: std::unique_ptr<LoopInfo> LI; FunctionAnalysisManager FAM; ModuleAnalysisManager MAM; - ir2vec::Vocabulary *IR2VecVocab; + std::unique_ptr<ir2vec::Vocabulary> IR2VecVocab; void TearDown() override { // Restore original IR2Vec weights diff --git a/llvm/unittests/Analysis/IR2VecTest.cpp b/llvm/unittests/Analysis/IR2VecTest.cpp index 743628f..d136cb6 100644 --- a/llvm/unittests/Analysis/IR2VecTest.cpp +++ b/llvm/unittests/Analysis/IR2VecTest.cpp @@ -295,7 +295,7 @@ TEST(IR2VecTest, ZeroDimensionEmbedding) { // Fixture for IR2Vec tests requiring IR setup. class IR2VecTestFixture : public ::testing::Test { protected: - Vocabulary *V; + std::unique_ptr<Vocabulary> V; LLVMContext Ctx; std::unique_ptr<Module> M; Function *F = nullptr; @@ -304,7 +304,7 @@ protected: Instruction *RetInst = nullptr; void SetUp() override { - V = new Vocabulary(Vocabulary::createDummyVocabForTest(2)); + V = std::make_unique<Vocabulary>(Vocabulary::createDummyVocabForTest(2)); // Setup IR M = std::make_unique<Module>("TestM", Ctx); diff --git a/llvm/unittests/Object/BuildIDTest.cpp b/llvm/unittests/Object/BuildIDTest.cpp new file mode 100644 index 0000000..04ca636 --- /dev/null +++ b/llvm/unittests/Object/BuildIDTest.cpp @@ -0,0 +1,120 @@ +//===- BuildIDTest.cpp - Tests for getBuildID ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Object/BuildID.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Object/ELFObjectFile.h" +#include "llvm/ObjectYAML/yaml2obj.h" +#include "llvm/Support/YAMLTraits.h" +#include "llvm/Testing/Support/Error.h" + +#include "gtest/gtest.h" + +using namespace llvm; +using namespace llvm::object; + +template <class ELFT> +static Expected<ELFObjectFile<ELFT>> toBinary(SmallVectorImpl<char> &Storage, + StringRef Yaml) { + raw_svector_ostream OS(Storage); + yaml::Input YIn(Yaml); + if (!yaml::convertYAML(YIn, OS, [](const Twine &Msg) {})) + return createStringError(std::errc::invalid_argument, + "unable to convert YAML"); + return ELFObjectFile<ELFT>::create(MemoryBufferRef(OS.str(), "dummyELF")); +} + +static StringRef getInvalidNoteELF(bool WithShdr) { + static std::string WithSection(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +ProgramHeaders: + - Type: PT_NOTE + FileSize: 0x1a + FirstSec: .note.gnu.build-id + LastSec: .note.gnu.build-id +Sections: + - Name: .note.gnu.build-id + Type: SHT_NOTE + AddressAlign: 0x04 + Notes: + - Name: "GNU" + Desc: "abb50d82b6bdc861" + Type: 3 +)"); + static std::string WithoutSection(WithSection + R"( + - Type: SectionHeaderTable + NoHeaders: true +)"); + if (WithShdr) + return WithSection; + return WithoutSection; +} + +// The BuildID can be looked up from a section header, if there is no program +// header. +TEST(BuildIDTest, InvalidPhdrFileSizeWithShdrs) { + SmallString<0> Storage; + Expected<ELFObjectFile<ELF64LE>> ElfOrErr = + toBinary<ELF64LE>(Storage, getInvalidNoteELF(true)); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + BuildIDRef BuildID = getBuildID(&ElfOrErr.get()); + EXPECT_EQ( + StringRef(reinterpret_cast<const char *>(BuildID.data()), BuildID.size()), + "\xAB\xB5\x0D\x82\xB6\xBD\xC8\x61"); +} + +// The code handles a malformed program header that points at data outside the +// file. +TEST(BuildIDTest, InvalidPhdrFileSizeNoShdrs) { + SmallString<0> Storage; + Expected<ELFObjectFile<ELF64LE>> ElfOrErr = + toBinary<ELF64LE>(Storage, getInvalidNoteELF(false)); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + BuildIDRef BuildID = getBuildID(&ElfOrErr.get()); + EXPECT_EQ( + StringRef(reinterpret_cast<const char *>(BuildID.data()), BuildID.size()), + ""); +} + +// The code handles a malformed section header that points at data outside the +// file. +TEST(BuildIDTest, InvalidSectionHeader) { + SmallString<0> Storage; + Expected<ELFObjectFile<ELF64LE>> ElfOrErr = toBinary<ELF64LE>(Storage, R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +ProgramHeaders: + - Type: PT_NOTE + FirstSec: .note.gnu.build-id + LastSec: .note.gnu.build-id +Sections: + - Name: .note.gnu.build-id + Type: SHT_NOTE + AddressAlign: 0x04 + ShOffset: 0x1a1 + Notes: + - Name: "GNU" + Desc: "abb50d82b6bdc861" + Type: 3 +)"); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + BuildIDRef BuildID = getBuildID(&ElfOrErr.get()); + EXPECT_EQ( + StringRef(reinterpret_cast<const char *>(BuildID.data()), BuildID.size()), + "\xAB\xB5\x0D\x82\xB6\xBD\xC8\x61"); +} diff --git a/llvm/unittests/Object/CMakeLists.txt b/llvm/unittests/Object/CMakeLists.txt index 1343352..cd70a7b 100644 --- a/llvm/unittests/Object/CMakeLists.txt +++ b/llvm/unittests/Object/CMakeLists.txt @@ -7,6 +7,7 @@ set(LLVM_LINK_COMPONENTS add_llvm_unittest(ObjectTests ArchiveTest.cpp + BuildIDTest.cpp COFFObjectFileTest.cpp DXContainerTest.cpp ELFObjectFileTest.cpp diff --git a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp index a943e7ac..b99d656 100644 --- a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp @@ -203,7 +203,7 @@ TEST_F(VPlanHCFGTest, testVPInstructionToVPRecipesInner) { VPInstruction::BranchOnCond, {Plan->getOrAddLiveIn(ConstantInt::getTrue(F->getContext()))})); VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, [](PHINode *P) { return nullptr; }, TLI); + *Plan, [](PHINode *P) { return nullptr; }, TLI); VPBlockBase *Entry = Plan->getEntry()->getEntryBasicBlock(); EXPECT_EQ(0u, Entry->getNumPredecessors()); diff --git a/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp index eb075e6..b89d378 100644 --- a/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp @@ -48,7 +48,7 @@ TEST_F(VPUncountableExitTest, FindUncountableExitRecipes) { BasicBlock *LoopHeader = F->getEntryBlock().getSingleSuccessor(); auto Plan = buildVPlan(LoopHeader, /*HasUncountableExit=*/true); VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, [](PHINode *P) { return nullptr; }, *TLI); + *Plan, [](PHINode *P) { return nullptr; }, *TLI); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); SmallVector<VPRecipeBase *> Recipes; @@ -85,7 +85,7 @@ TEST_F(VPUncountableExitTest, NoUncountableExit) { BasicBlock *LoopHeader = F->getEntryBlock().getSingleSuccessor(); auto Plan = buildVPlan(LoopHeader); VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, [](PHINode *P) { return nullptr; }, *TLI); + *Plan, [](PHINode *P) { return nullptr; }, *TLI); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); SmallVector<VPRecipeBase *> Recipes; diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp index 75bea77..8076ce2 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp @@ -246,16 +246,14 @@ bool TypeSetByHwMode::operator==(const TypeSetByHwMode &VTS) const { return true; } -namespace llvm { -raw_ostream &operator<<(raw_ostream &OS, const MachineValueTypeSet &T) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineValueTypeSet &T) { T.writeToStream(OS); return OS; } -raw_ostream &operator<<(raw_ostream &OS, const TypeSetByHwMode &T) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const TypeSetByHwMode &T) { T.writeToStream(OS); return OS; } -} // namespace llvm LLVM_DUMP_METHOD void TypeSetByHwMode::dump() const { dbgs() << *this << '\n'; } diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp index e873b3e..8d0ec9a 100644 --- a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp +++ b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp @@ -744,7 +744,7 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, RSI.insertRegSizeForMode(DefaultMode, RI); } - CopyCost = R->getValueAsInt("CopyCost"); + int CopyCostParsed = R->getValueAsInt("CopyCost"); Allocatable = R->getValueAsBit("isAllocatable"); AltOrderSelect = R->getValueAsString("AltOrderSelect"); int AllocationPriority = R->getValueAsInt("AllocationPriority"); @@ -757,6 +757,14 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, const BitsInit *TSF = R->getValueAsBitsInit("TSFlags"); for (auto [Idx, Bit] : enumerate(TSF->getBits())) TSFlags |= uint8_t(cast<BitInit>(Bit)->getValue()) << Idx; + + // Saturate negative costs to the maximum + if (CopyCostParsed < 0) + CopyCost = std::numeric_limits<uint8_t>::max(); + else if (!isUInt<8>(CopyCostParsed)) + PrintFatalError(R->getLoc(), "'CopyCost' must be an 8-bit value"); + + CopyCost = CopyCostParsed; } // Create an inferred register class that was missing from the .td files. @@ -849,17 +857,6 @@ unsigned CodeGenRegisterClass::getWeight(const CodeGenRegBank &RegBank) const { return (*Members.begin())->getWeight(RegBank); } -namespace llvm { - -raw_ostream &operator<<(raw_ostream &OS, const CodeGenRegisterClass::Key &K) { - OS << "{ " << K.RSI; - for (const auto R : *K.Members) - OS << ", " << R->getName(); - return OS << " }"; -} - -} // end namespace llvm - // This is a simple lexicographical order that can be used to search for sets. // It is not the same as the topological order provided by TopoOrderRC. bool CodeGenRegisterClass::Key::operator<( diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.h b/llvm/utils/TableGen/Common/CodeGenRegisters.h index 81aa663..89dac12 100644 --- a/llvm/utils/TableGen/Common/CodeGenRegisters.h +++ b/llvm/utils/TableGen/Common/CodeGenRegisters.h @@ -359,7 +359,7 @@ public: StringRef Namespace; SmallVector<ValueTypeByHwMode, 4> VTs; RegSizeInfoByHwMode RSI; - int CopyCost; + uint8_t CopyCost; bool Allocatable; StringRef AltOrderSelect; uint8_t AllocationPriority; diff --git a/llvm/utils/TableGen/Common/InfoByHwMode.cpp b/llvm/utils/TableGen/Common/InfoByHwMode.cpp index a6e2fc4..4c8197d 100644 --- a/llvm/utils/TableGen/Common/InfoByHwMode.cpp +++ b/llvm/utils/TableGen/Common/InfoByHwMode.cpp @@ -227,19 +227,17 @@ EncodingInfoByHwMode::EncodingInfoByHwMode(const Record *R, } } -namespace llvm { -raw_ostream &operator<<(raw_ostream &OS, const ValueTypeByHwMode &T) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const ValueTypeByHwMode &T) { T.writeToStream(OS); return OS; } -raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfo &T) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const RegSizeInfo &T) { T.writeToStream(OS); return OS; } -raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfoByHwMode &T) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const RegSizeInfoByHwMode &T) { T.writeToStream(OS); return OS; } -} // namespace llvm diff --git a/llvm/utils/TableGen/Common/PredicateExpander.cpp b/llvm/utils/TableGen/Common/PredicateExpander.cpp index 09d9538..03252ed 100644 --- a/llvm/utils/TableGen/Common/PredicateExpander.cpp +++ b/llvm/utils/TableGen/Common/PredicateExpander.cpp @@ -14,7 +14,7 @@ #include "CodeGenSchedule.h" // Definition of STIPredicateFunction. #include "llvm/TableGen/Record.h" -namespace llvm { +using namespace llvm; void PredicateExpander::expandTrue(raw_ostream &OS) { OS << "true"; } void PredicateExpander::expandFalse(raw_ostream &OS) { OS << "false"; } @@ -553,5 +553,3 @@ void STIPredicateExpander::expandSTIPredicate(raw_ostream &OS, expandEpilogue(OS, Fn); } } - -} // namespace llvm diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp index 09ce9f3..9471959 100644 --- a/llvm/utils/TableGen/DXILEmitter.cpp +++ b/llvm/utils/TableGen/DXILEmitter.cpp @@ -37,15 +37,6 @@ struct DXILIntrinsicSelect { SmallVector<const Record *> ArgSelectRecords; }; -static StringRef StripIntrinArgSelectTypePrefix(StringRef Type) { - StringRef Prefix = "IntrinArgSelect_"; - if (!Type.starts_with(Prefix)) { - PrintFatalError("IntrinArgSelectType definintion must be prefixed with " - "'IntrinArgSelect_'"); - } - return Type.substr(Prefix.size()); -} - struct DXILOperationDesc { std::string OpName; // name of DXIL operation int OpCode; // ID of DXIL operation @@ -66,6 +57,15 @@ struct DXILOperationDesc { }; } // end anonymous namespace +static StringRef stripIntrinArgSelectTypePrefix(StringRef Type) { + StringRef Prefix = "IntrinArgSelect_"; + if (!Type.starts_with(Prefix)) { + PrintFatalError("IntrinArgSelectType definintion must be prefixed with " + "'IntrinArgSelect_'"); + } + return Type.substr(Prefix.size()); +} + /// In-place sort TableGen records of class with a field /// Version dxil_version /// in the ascending version order. @@ -449,7 +449,7 @@ static void emitDXILIntrinsicMap(ArrayRef<DXILOperationDesc> Ops, ArgSelect->getValueAsDef("type")->getNameInitAsString(); int Value = ArgSelect->getValueAsInt("value"); OS << "(IntrinArgSelect{" - << "IntrinArgSelect::Type::" << StripIntrinArgSelectTypePrefix(Type) + << "IntrinArgSelect::Type::" << stripIntrinArgSelectTypePrefix(Type) << "," << Value << "}), "; } OS << ")\n"; @@ -466,7 +466,7 @@ static void emitDXILIntrinsicArgSelectTypes(const RecordKeeper &Records, OS << "#ifdef DXIL_OP_INTRINSIC_ARG_SELECT_TYPE\n"; for (const Record *Records : Records.getAllDerivedDefinitions("IntrinArgSelectType")) { - StringRef StrippedName = StripIntrinArgSelectTypePrefix(Records->getName()); + StringRef StrippedName = stripIntrinArgSelectTypePrefix(Records->getName()); OS << "DXIL_OP_INTRINSIC_ARG_SELECT_TYPE(" << StrippedName << ")\n"; } OS << "#undef DXIL_OP_INTRINSIC_ARG_SELECT_TYPE\n"; diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp index 961dc28..5d41b7d 100644 --- a/llvm/utils/TableGen/DecoderEmitter.cpp +++ b/llvm/utils/TableGen/DecoderEmitter.cpp @@ -194,10 +194,6 @@ private: void parseInstructionEncodings(); }; -} // end anonymous namespace - -namespace { - struct EncodingIsland { unsigned StartBit; unsigned NumBits; diff --git a/llvm/utils/TableGen/ExegesisEmitter.cpp b/llvm/utils/TableGen/ExegesisEmitter.cpp index 1b4b072..bd69919 100644 --- a/llvm/utils/TableGen/ExegesisEmitter.cpp +++ b/llvm/utils/TableGen/ExegesisEmitter.cpp @@ -58,6 +58,14 @@ private: const std::map<llvm::StringRef, unsigned> PfmCounterNameTable; }; +struct ValidationCounterInfo { + int64_t EventNumber; + StringRef EventName; + unsigned PfmCounterID; +}; + +} // namespace + static std::map<llvm::StringRef, unsigned> collectPfmCounters(const RecordKeeper &Records) { std::map<llvm::StringRef, unsigned> PfmCounterNameTable; @@ -106,14 +114,8 @@ ExegesisEmitter::ExegesisEmitter(const RecordKeeper &RK) Target = Targets[0]->getName().str(); } -struct ValidationCounterInfo { - int64_t EventNumber; - StringRef EventName; - unsigned PfmCounterID; -}; - -bool EventNumberLess(const ValidationCounterInfo &LHS, - const ValidationCounterInfo &RHS) { +static bool EventNumberLess(const ValidationCounterInfo &LHS, + const ValidationCounterInfo &RHS) { return LHS.EventNumber < RHS.EventNumber; } @@ -221,7 +223,7 @@ void ExegesisEmitter::emitPfmCounters(raw_ostream &OS) const { emitPfmCountersInfo(*Def, IssueCountersTableOffset, OS); OS << "\n"; -} // namespace +} void ExegesisEmitter::emitPfmCountersLookupTable(raw_ostream &OS) const { std::vector<const Record *> Bindings = @@ -249,7 +251,5 @@ void ExegesisEmitter::run(raw_ostream &OS) const { emitPfmCountersLookupTable(OS); } -} // end anonymous namespace - static TableGen::Emitter::OptClass<ExegesisEmitter> X("gen-exegesis", "Generate llvm-exegesis tables"); diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp index 694d89a..dba8bde 100644 --- a/llvm/utils/TableGen/FastISelEmitter.cpp +++ b/llvm/utils/TableGen/FastISelEmitter.cpp @@ -52,11 +52,9 @@ struct InstructionMemo { InstructionMemo(const InstructionMemo &Other) = delete; InstructionMemo(InstructionMemo &&Other) = default; }; -} // End anonymous namespace /// ImmPredicateSet - This uniques predicates (represented as a string) and /// gives them unique (small) integer ID's that start at 0. -namespace { class ImmPredicateSet { DenseMap<TreePattern *, unsigned> ImmIDs; std::vector<TreePredicateFn> PredsByName; @@ -77,12 +75,10 @@ public: iterator begin() const { return PredsByName.begin(); } iterator end() const { return PredsByName.end(); } }; -} // End anonymous namespace /// OperandsSignature - This class holds a description of a list of operand /// types. It has utility methods for emitting text based on the operands. /// -namespace { struct OperandsSignature { class OpKind { enum { OK_Reg, OK_FP, OK_Imm, OK_Invalid = -1 }; @@ -366,9 +362,7 @@ struct OperandsSignature { Opnd.printManglingSuffix(OS, ImmPredicates, StripImmCodes); } }; -} // End anonymous namespace -namespace { class FastISelMap { // A multimap is needed instead of a "plain" map because the key is // the instruction's complexity (an int) and they are not unique. diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp index c4b1c5f5..a67a5a9 100644 --- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp +++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp @@ -1083,14 +1083,13 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS) { std::string RCName = Order.empty() ? "nullptr" : RC.getName(); std::string RCBitsName = Order.empty() ? "nullptr" : RC.getName() + "Bits"; std::string RCBitsSize = Order.empty() ? "0" : "sizeof(" + RCBitsName + ")"; - assert(isInt<8>(RC.CopyCost) && "Copy cost too large."); uint32_t RegSize = 0; if (RC.RSI.isSimple()) RegSize = RC.RSI.getSimple().RegSize; OS << " { " << RCName << ", " << RCBitsName << ", " << RegClassStrings.get(RC.getName()) << ", " << RC.getOrder().size() << ", " << RCBitsSize << ", " << RC.getQualifiedIdName() << ", " - << RegSize << ", " << RC.CopyCost << ", " + << RegSize << ", " << static_cast<unsigned>(RC.CopyCost) << ", " << (RC.Allocatable ? "true" : "false") << ", " << (RC.getBaseClassOrder() ? "true" : "false") << " },\n"; } diff --git a/llvm/utils/TableGen/X86DisassemblerShared.h b/llvm/utils/TableGen/X86DisassemblerShared.h index f60fd47..d5f936d 100644 --- a/llvm/utils/TableGen/X86DisassemblerShared.h +++ b/llvm/utils/TableGen/X86DisassemblerShared.h @@ -14,6 +14,8 @@ #include "llvm/Support/X86DisassemblerDecoderCommon.h" +namespace llvm::X86Disassembler { + struct InstructionSpecifier { llvm::X86Disassembler::OperandSpecifier operands[llvm::X86Disassembler::X86_MAX_OPERANDS]; @@ -52,4 +54,6 @@ struct ContextDecision { ContextDecision() { memset(opcodeDecisions, 0, sizeof(opcodeDecisions)); } }; +} // namespace llvm::X86Disassembler + #endif diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp index 1e1e4ab..6f523b5 100644 --- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp +++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp @@ -30,22 +30,23 @@ struct ManualMapEntry { const char *MemInstStr; uint16_t Strategy; }; +} // namespace // List of instructions requiring explicitly aligned memory. -const char *ExplicitAlign[] = {"MOVDQA", "MOVAPS", "MOVAPD", "MOVNTPS", - "MOVNTPD", "MOVNTDQ", "MOVNTDQA"}; +static constexpr const char *ExplicitAlign[] = { + "MOVDQA", "MOVAPS", "MOVAPD", "MOVNTPS", "MOVNTPD", "MOVNTDQ", "MOVNTDQA"}; // List of instructions NOT requiring explicit memory alignment. -const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD", - "PCMPESTRM", "PCMPESTRI", "PCMPISTRM", - "PCMPISTRI"}; +static constexpr const char *ExplicitUnalign[] = { + "MOVDQU", "MOVUPS", "MOVUPD", "PCMPESTRM", + "PCMPESTRI", "PCMPISTRM", "PCMPISTRI"}; -const ManualMapEntry ManualMapSet[] = { +static const ManualMapEntry ManualMapSet[] = { #define ENTRY(REG, MEM, FLAGS) {#REG, #MEM, FLAGS}, #include "X86ManualFoldTables.def" }; -const std::set<StringRef> NoFoldSet = { +static const std::set<StringRef> NoFoldSet = { #define NOFOLD(INSN) #INSN, #include "X86ManualFoldTables.def" }; @@ -62,6 +63,7 @@ static bool isExplicitUnalign(const CodeGenInstruction *Inst) { }); } +namespace { class X86FoldTablesEmitter { const RecordKeeper &Records; const CodeGenTarget Target; @@ -230,6 +232,7 @@ private: OS << "};\n\n"; } }; +} // namespace // Return true if one of the instruction's operands is a RST register class static bool hasRSTRegClass(const CodeGenInstruction *Inst) { @@ -318,6 +321,7 @@ static bool isNOREXRegClass(const Record *Op) { // Function object - Operator() returns true if the given Reg instruction // matches the Mem instruction of this object. +namespace { class IsMatch { const CodeGenInstruction *MemInst; const X86Disassembler::RecognizableInstrBase MemRI; diff --git a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp index be5e2a7..2745ba7 100644 --- a/llvm/utils/TableGen/X86InstrMappingEmitter.cpp +++ b/llvm/utils/TableGen/X86InstrMappingEmitter.cpp @@ -66,6 +66,7 @@ private: void printTable(ArrayRef<Entry> Table, StringRef Name, StringRef Macro, raw_ostream &OS); }; +} // namespace void X86InstrMappingEmitter::printClassDef(raw_ostream &OS) { OS << "struct X86TableEntry {\n" @@ -106,6 +107,7 @@ void X86InstrMappingEmitter::printTable(ArrayRef<Entry> Table, StringRef Name, printMacroEnd(Macro, OS); } +namespace { class IsMatch { const CodeGenInstruction *OldInst; @@ -146,6 +148,7 @@ public: return true; } }; +} // namespace static bool isInteresting(const Record *Rec) { // _REV instruction should not appear before encoding optimization @@ -368,7 +371,6 @@ void X86InstrMappingEmitter::run(raw_ostream &OS) { emitND2NonNDTable(Insts, OS); emitSSE2AVXTable(Insts, OS); } -} // namespace static TableGen::Emitter::OptClass<X86InstrMappingEmitter> X("gen-x86-instr-mapping", "Generate X86 instruction mapping"); diff --git a/llvm/utils/TableGen/X86MnemonicTables.cpp b/llvm/utils/TableGen/X86MnemonicTables.cpp index 85bd4df..7851919 100644 --- a/llvm/utils/TableGen/X86MnemonicTables.cpp +++ b/llvm/utils/TableGen/X86MnemonicTables.cpp @@ -30,6 +30,7 @@ public: // Output X86 mnemonic tables. void run(raw_ostream &OS); }; +} // namespace void X86MnemonicTablesEmitter::run(raw_ostream &OS) { emitSourceFileHeader("X86 Mnemonic tables", OS); @@ -83,7 +84,5 @@ void X86MnemonicTablesEmitter::run(raw_ostream &OS) { OS << "} // end namespace X86\n} // end namespace llvm"; } -} // namespace - static TableGen::Emitter::OptClass<X86MnemonicTablesEmitter> X("gen-x86-mnemonic-tables", "Generate X86 mnemonic tables"); diff --git a/llvm/utils/TableGen/X86ModRMFilters.h b/llvm/utils/TableGen/X86ModRMFilters.h index b579f22..7bf111f 100644 --- a/llvm/utils/TableGen/X86ModRMFilters.h +++ b/llvm/utils/TableGen/X86ModRMFilters.h @@ -19,9 +19,7 @@ #include <cstdint> -namespace llvm { - -namespace X86Disassembler { +namespace llvm::X86Disassembler { /// ModRMFilter - Abstract base class for clases that recognize patterns in /// ModR/M bytes. @@ -135,8 +133,6 @@ public: bool accepts(uint8_t modRM) const override { return (ModRM == modRM); } }; -} // namespace X86Disassembler - -} // namespace llvm +} // namespace llvm::X86Disassembler #endif diff --git a/llvm/utils/TableGen/X86RecognizableInstr.h b/llvm/utils/TableGen/X86RecognizableInstr.h index b74e74d..52f9538 100644 --- a/llvm/utils/TableGen/X86RecognizableInstr.h +++ b/llvm/utils/TableGen/X86RecognizableInstr.h @@ -22,8 +22,6 @@ #include <string> #include <vector> -struct InstructionSpecifier; - namespace llvm { class Record; #define X86_INSTR_MRM_MAPPING \ @@ -179,6 +177,8 @@ enum { ExplicitREX2 = 1, ExplicitEVEX = 3 }; namespace X86Disassembler { class DisassemblerTables; +struct InstructionSpecifier; + /// Extract common fields of a single X86 instruction from a CodeGenInstruction struct RecognizableInstrBase { /// The OpPrefix field from the record diff --git a/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn index b856d1c..764ebb9 100644 --- a/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn @@ -28,6 +28,7 @@ static_library("Rewrite") { "BuildIDRewriter.cpp", "DWARFRewriter.cpp", "ExecutableFileMemoryManager.cpp", + "GNUPropertyRewriter.cpp", "JITLinkLinker.cpp", "LinuxKernelRewriter.cpp", "MachORewriteInstance.cpp", diff --git a/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn index 9fcb05c..54193c8 100644 --- a/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn @@ -10,6 +10,7 @@ unittest("ObjectTests") { ] sources = [ "ArchiveTest.cpp", + "BuildIDTest.cpp", "COFFObjectFileTest.cpp", "DXContainerTest.cpp", "ELFObjectFileTest.cpp", |