diff options
Diffstat (limited to 'llvm')
213 files changed, 12169 insertions, 12248 deletions
diff --git a/llvm/docs/TestingGuide.rst b/llvm/docs/TestingGuide.rst index b6dda6a..76b6b4e 100644 --- a/llvm/docs/TestingGuide.rst +++ b/llvm/docs/TestingGuide.rst @@ -152,12 +152,12 @@ can run the LLVM and Clang tests simultaneously using: % make check-all -To run the tests with Valgrind (Memcheck by default), use the ``LIT_ARGS`` make +To run the tests with Valgrind (Memcheck by default), use the ``LIT_OPTS`` make variable to pass the required options to lit. For example, you can use: .. code-block:: bash - % make check LIT_ARGS="-v --vg --vg-leak" + % make check LIT_OPTS="-v --vg --vg-leak" to enable testing with valgrind and with leak checking enabled. diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h index 73bfe1a..af6e534 100644 --- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h @@ -236,8 +236,8 @@ public: /// In same cases when the dependency check fails we can still /// vectorize the loop with a dynamic array access check. - bool shouldRetryWithRuntimeCheck() const { - return FoundNonConstantDistanceDependence && + bool shouldRetryWithRuntimeChecks() const { + return ShouldRetryWithRuntimeChecks && Status == VectorizationSafetyStatus::PossiblySafeWithRtChecks; } @@ -327,9 +327,9 @@ private: uint64_t MaxStoreLoadForwardSafeDistanceInBits = std::numeric_limits<uint64_t>::max(); - /// If we see a non-constant dependence distance we can still try to - /// vectorize this loop with runtime checks. - bool FoundNonConstantDistanceDependence = false; + /// Whether we should try to vectorize the loop with runtime checks, if the + /// dependencies are not safe. + bool ShouldRetryWithRuntimeChecks = false; /// Result of the dependence checks, indicating whether the checked /// dependences are safe for vectorization, require RT checks or are known to diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h index e4f82ad..ad35d7f 100644 --- a/llvm/include/llvm/BinaryFormat/ELF.h +++ b/llvm/include/llvm/BinaryFormat/ELF.h @@ -362,6 +362,7 @@ enum { ELFOSABI_FENIXOS = 16, // FenixOS ELFOSABI_CLOUDABI = 17, // Nuxi CloudABI ELFOSABI_CUDA = 51, // NVIDIA CUDA architecture. + ELFOSABI_CUDA_V2 = 41, // NVIDIA CUDA architecture. ELFOSABI_FIRST_ARCH = 64, // First architecture-specific OS ABI ELFOSABI_AMDGPU_HSA = 64, // AMD HSA runtime ELFOSABI_AMDGPU_PAL = 65, // AMD PAL runtime @@ -385,6 +386,12 @@ enum { ELFABIVERSION_AMDGPU_HSA_V6 = 4, }; +// CUDA OS ABI Version identification. +enum { + ELFABIVERSION_CUDA_V1 = 7, + ELFABIVERSION_CUDA_V2 = 8, +}; + #define ELF_RELOC(name, value) name = value, // X86_64 relocations. @@ -921,7 +928,7 @@ enum { // NVPTX specific e_flags. enum : unsigned { - // Processor selection mask for EF_CUDA_SM* values. + // Processor selection mask for EF_CUDA_SM* values prior to blackwell. EF_CUDA_SM = 0xff, // SM based processor values. @@ -954,12 +961,22 @@ enum : unsigned { // The target is using 64-bit addressing. EF_CUDA_64BIT_ADDRESS = 0x400, // Set when using the sm_90a processor. - EF_CUDA_ACCELERATORS = 0x800, + EF_CUDA_ACCELERATORS_V1 = 0x800, // Undocumented software feature. EF_CUDA_SW_FLAG_V2 = 0x1000, // Virtual processor selection mask for EF_CUDA_VIRTUAL_SM* values. EF_CUDA_VIRTUAL_SM = 0xff0000, + + // Processor selection mask for EF_CUDA_SM* values following blackwell. + EF_CUDA_SM_MASK = 0xff00, + + // SM based processor values. + EF_CUDA_SM100 = 0x6400, + EF_CUDA_SM120 = 0x7800, + + // Set when using an accelerator variant like sm_100a. + EF_CUDA_ACCELERATORS = 0x8, }; // ELF Relocation types for BPF diff --git a/llvm/include/llvm/CodeGen/MachineScheduler.h b/llvm/include/llvm/CodeGen/MachineScheduler.h index e7a7091..efda7eb 100644 --- a/llvm/include/llvm/CodeGen/MachineScheduler.h +++ b/llvm/include/llvm/CodeGen/MachineScheduler.h @@ -65,7 +65,7 @@ // // void <SubTarget>Subtarget:: // overrideSchedPolicy(MachineSchedPolicy &Policy, -// unsigned NumRegionInstrs) const { +// const SchedRegion &Region) const { // Policy.<Flag> = true; // } // @@ -218,6 +218,22 @@ struct MachineSchedPolicy { MachineSchedPolicy() = default; }; +/// A region of an MBB for scheduling. +struct SchedRegion { + /// RegionBegin is the first instruction in the scheduling region, and + /// RegionEnd is either MBB->end() or the scheduling boundary after the + /// last instruction in the scheduling region. These iterators cannot refer + /// to instructions outside of the identified scheduling region because + /// those may be reordered before scheduling this region. + MachineBasicBlock::iterator RegionBegin; + MachineBasicBlock::iterator RegionEnd; + unsigned NumRegionInstrs; + + SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, + unsigned N) + : RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} +}; + /// MachineSchedStrategy - Interface to the scheduling algorithm used by /// ScheduleDAGMI. /// diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index 657951d..eac8e14 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1202,13 +1202,16 @@ public: LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops, const SDNodeFlags Flags); LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, - ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops); + ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops, + const SDNodeFlags Flags); LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, ArrayRef<SDValue> Ops, const SDNodeFlags Flags); // Use flags from current flag inserter. LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops); + LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, + ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops); LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, ArrayRef<SDValue> Ops); LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, @@ -1346,9 +1349,10 @@ public: /// Helper function to make it easier to build SelectCC's if you just have an /// ISD::CondCode instead of an SDValue. SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, - SDValue False, ISD::CondCode Cond) { + SDValue False, ISD::CondCode Cond, + SDNodeFlags Flags = SDNodeFlags()) { return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True, - False, getCondCode(Cond)); + False, getCondCode(Cond), Flags); } /// Try to simplify a select/vselect into 1 of its operands or a constant. @@ -1425,10 +1429,9 @@ public: /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between - /// offsets `Offset` and `Offset + Size`. + /// offsets `0` and `Size`. LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, - int FrameIndex, int64_t Size, - int64_t Offset = -1); + int FrameIndex, int64_t Size); /// Creates a PseudoProbeSDNode with function GUID `Guid` and /// the index of the block `Index` it is probing, as well as the attributes diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 5d9937f..8e9c1f7 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -2004,25 +2004,17 @@ public: class LifetimeSDNode : public SDNode { friend class SelectionDAG; int64_t Size; - int64_t Offset; // -1 if offset is unknown. LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, - SDVTList VTs, int64_t Size, int64_t Offset) - : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {} + SDVTList VTs, int64_t Size) + : SDNode(Opcode, Order, dl, VTs), Size(Size) {} + public: int64_t getFrameIndex() const { return cast<FrameIndexSDNode>(getOperand(1))->getIndex(); } - bool hasOffset() const { return Offset >= 0; } - int64_t getOffset() const { - assert(hasOffset() && "offset is unknown"); - return Offset; - } - int64_t getSize() const { - assert(hasOffset() && "offset is unknown"); - return Size; - } + int64_t getSize() const { return Size; } // Methods to support isa and dyn_cast static bool classof(const SDNode *N) { diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h index 45e67d8..a8c7a8a 100644 --- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h +++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h @@ -54,6 +54,7 @@ class TargetRegisterClass; class TargetRegisterInfo; class TargetSchedModel; class Triple; +struct SchedRegion; //===----------------------------------------------------------------------===// /// @@ -231,7 +232,7 @@ public: /// scheduling heuristics (no custom MachineSchedStrategy) to make /// changes to the generic scheduling policy. virtual void overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const {} + const SchedRegion &Region) const {} /// Override generic post-ra scheduling policy within a region. /// @@ -241,7 +242,7 @@ public: /// Note that some options like tracking register pressure won't take effect /// in post-ra scheduling. virtual void overridePostRASchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const {} + const SchedRegion &Region) const {} // Perform target-specific adjustments to the latency of a schedule // dependency. diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h index de888ff..7919f7a 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h @@ -779,16 +779,17 @@ struct LinkT { template <typename T, typename I, typename E> // struct MapT { using LocatorList = ObjectListT<I, E>; - ENUM(MapType, To, From, Tofrom, Alloc, Release, Delete); - ENUM(MapTypeModifier, Always, Close, Present, OmpxHold); + ENUM(MapType, To, From, Tofrom, Storage); + ENUM(MapTypeModifier, Always, Close, Delete, Present, Self, OmpxHold); + ENUM(RefModifier, RefPtee, RefPtr, RefPtrPtee); // See note at the definition of the MapperT type. using Mappers = ListT<type::MapperT<I, E>>; // Not a spec name using Iterator = type::IteratorT<T, I, E>; using MapTypeModifiers = ListT<MapTypeModifier>; // Not a spec name using TupleTrait = std::true_type; - std::tuple<OPT(MapType), OPT(MapTypeModifiers), OPT(Mappers), OPT(Iterator), - LocatorList> + std::tuple<OPT(MapType), OPT(MapTypeModifiers), OPT(RefModifier), + OPT(Mappers), OPT(Iterator), LocatorList> t; }; diff --git a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h index 611bfe3..047baa3 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h @@ -708,6 +708,7 @@ bool ConstructDecompositionT<C, H>::applyClause( tomp::clause::MapT<TypeTy, IdTy, ExprTy>{ {/*MapType=*/MapType::Tofrom, /*MapTypeModifier=*/std::nullopt, + /*RefModifier=*/std::nullopt, /*Mapper=*/std::nullopt, /*Iterator=*/std::nullopt, /*LocatorList=*/std::move(tofrom)}}); dirTarget->clauses.push_back(map); @@ -969,8 +970,8 @@ bool ConstructDecompositionT<C, H>::applyClause( llvm::omp::Clause::OMPC_map, tomp::clause::MapT<TypeTy, IdTy, ExprTy>{ {/*MapType=*/MapType::Tofrom, /*MapTypeModifier=*/std::nullopt, - /*Mapper=*/std::nullopt, /*Iterator=*/std::nullopt, - /*LocatorList=*/std::move(tofrom)}}); + /*RefModifier=*/std::nullopt, /*Mapper=*/std::nullopt, + /*Iterator=*/std::nullopt, /*LocatorList=*/std::move(tofrom)}}); dirTarget->clauses.push_back(map); applied = true; diff --git a/llvm/include/llvm/IR/PassInstrumentation.h b/llvm/include/llvm/IR/PassInstrumentation.h index 0315715..33eda5a 100644 --- a/llvm/include/llvm/IR/PassInstrumentation.h +++ b/llvm/include/llvm/IR/PassInstrumentation.h @@ -164,7 +164,7 @@ public: /// Add a class name to pass name mapping for use by pass instrumentation. LLVM_ABI void addClassToPassName(StringRef ClassName, StringRef PassName); - /// Get the pass name for a given pass class name. + /// Get the pass name for a given pass class name. Empty if no match found. LLVM_ABI StringRef getPassNameForClassName(StringRef ClassName); private: diff --git a/llvm/include/llvm/Object/ELFObjectFile.h b/llvm/include/llvm/Object/ELFObjectFile.h index a3aa0d9..ced1afd 100644 --- a/llvm/include/llvm/Object/ELFObjectFile.h +++ b/llvm/include/llvm/Object/ELFObjectFile.h @@ -1479,6 +1479,7 @@ template <class ELFT> Triple::OSType ELFObjectFile<ELFT>::getOS() const { case ELF::ELFOSABI_OPENBSD: return Triple::OpenBSD; case ELF::ELFOSABI_CUDA: + case ELF::ELFOSABI_CUDA_V2: return Triple::CUDA; case ELF::ELFOSABI_AMDGPU_HSA: return Triple::AMDHSA; diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index f3a32d3..14be385 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -589,11 +589,11 @@ void RuntimePointerChecking::groupChecks( // dependence. Not grouping the checks for a[i] and a[i + 9000] allows // us to perform an accurate check in this case. // - // The above case requires that we have an UnknownDependence between - // accesses to the same underlying object. This cannot happen unless - // FoundNonConstantDistanceDependence is set, and therefore UseDependencies - // is also false. In this case we will use the fallback path and create - // separate checking groups for all pointers. + // In the above case, we have a non-constant distance and an Unknown + // dependence between accesses to the same underlying object, and could retry + // with runtime checks. Therefore UseDependencies is false. In this case we + // will use the fallback path and create separate checking groups for all + // pointers. // If we don't have the dependency partitions, construct a new // checking pointer group for each pointer. This is also required @@ -819,7 +819,7 @@ public: /// perform dependency checking. /// /// Note that this can later be cleared if we retry memcheck analysis without - /// dependency checking (i.e. FoundNonConstantDistanceDependence). + /// dependency checking (i.e. ShouldRetryWithRuntimeChecks). bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); } /// We decided that no dependence analysis would be used. Reset the state. @@ -896,7 +896,7 @@ private: /// /// Note that, this is different from isDependencyCheckNeeded. When we retry /// memcheck analysis without dependency checking - /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is + /// (i.e. ShouldRetryWithRuntimeChecks), isDependencyCheckNeeded is /// cleared while this remains set if we have potentially dependent accesses. bool IsRTCheckAnalysisNeeded = false; @@ -2079,11 +2079,10 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize( if (StrideAScaled == StrideBScaled) CommonStride = StrideAScaled; - // TODO: FoundNonConstantDistanceDependence is used as a necessary condition - // to consider retrying with runtime checks. Historically, we did not set it - // when (unscaled) strides were different but there is no inherent reason to. + // TODO: Historically, we didn't retry with runtime checks when (unscaled) + // strides were different but there is no inherent reason to. if (!isa<SCEVConstant>(Dist)) - FoundNonConstantDistanceDependence |= StrideAPtrInt == StrideBPtrInt; + ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt; // If distance is a SCEVCouldNotCompute, return Unknown immediately. if (isa<SCEVCouldNotCompute>(Dist)) { @@ -2712,7 +2711,7 @@ bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI, DepsAreSafe = DepChecker->areDepsSafe(DepCands, Accesses.getDependenciesToCheck()); - if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) { + if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeChecks()) { LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); // Clear the dependency checks. We assume they are not needed. diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 0027775..13bef1f 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -4786,9 +4786,13 @@ struct MDField : public MDFieldImpl<Metadata *> { }; struct MDStringField : public MDFieldImpl<MDString *> { - bool AllowEmpty; - MDStringField(bool AllowEmpty = true) - : ImplTy(nullptr), AllowEmpty(AllowEmpty) {} + enum class EmptyIs { + Null, //< Allow empty input string, map to nullptr + Empty, //< Allow empty input string, map to an empty MDString + Error, //< Disallow empty string, map to an error + } EmptyIs; + MDStringField(enum EmptyIs EmptyIs = EmptyIs::Null) + : ImplTy(nullptr), EmptyIs(EmptyIs) {} }; struct MDFieldList : public MDFieldImpl<SmallVector<Metadata *, 4>> { @@ -5260,10 +5264,19 @@ bool LLParser::parseMDField(LocTy Loc, StringRef Name, MDStringField &Result) { if (parseStringConstant(S)) return true; - if (!Result.AllowEmpty && S.empty()) - return error(ValueLoc, "'" + Name + "' cannot be empty"); + if (S.empty()) { + switch (Result.EmptyIs) { + case MDStringField::EmptyIs::Null: + Result.assign(nullptr); + return false; + case MDStringField::EmptyIs::Empty: + break; + case MDStringField::EmptyIs::Error: + return error(ValueLoc, "'" + Name + "' cannot be empty"); + } + } - Result.assign(S.empty() ? nullptr : MDString::get(Context, S)); + Result.assign(MDString::get(Context, S)); return false; } @@ -5781,7 +5794,7 @@ bool LLParser::parseDIFile(MDNode *&Result, bool IsDistinct) { REQUIRED(directory, MDStringField, ); \ OPTIONAL(checksumkind, ChecksumKindField, (DIFile::CSK_MD5)); \ OPTIONAL(checksum, MDStringField, ); \ - OPTIONAL(source, MDStringField, ); + OPTIONAL(source, MDStringField, (MDStringField::EmptyIs::Empty)); PARSE_MD_FIELDS(); #undef VISIT_MD_FIELDS @@ -6065,7 +6078,7 @@ bool LLParser::parseDITemplateValueParameter(MDNode *&Result, bool IsDistinct) { /// declaration: !4, align: 8) bool LLParser::parseDIGlobalVariable(MDNode *&Result, bool IsDistinct) { #define VISIT_MD_FIELDS(OPTIONAL, REQUIRED) \ - OPTIONAL(name, MDStringField, (/* AllowEmpty */ false)); \ + OPTIONAL(name, MDStringField, (MDStringField::EmptyIs::Error)); \ OPTIONAL(scope, MDField, ); \ OPTIONAL(linkageName, MDStringField, ); \ OPTIONAL(file, MDField, ); \ diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 10bdb81..f1d3e96 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1868,6 +1868,7 @@ void AsmPrinter::emitFunctionBody() { OutStreamer->emitLabel(MI.getOperand(0).getMCSymbol()); break; case TargetOpcode::EH_LABEL: + OutStreamer->AddComment("EH_LABEL"); OutStreamer->emitLabel(MI.getOperand(0).getMCSymbol()); // For AsynchEH, insert a Nop if followed by a trap inst // Or the exception won't be caught. diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp index dccd71f..13fd270 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp @@ -323,12 +323,6 @@ const MCExpr *WinException::getLabel(const MCSymbol *Label) { Asm->OutContext); } -const MCExpr *WinException::getLabelPlusOne(const MCSymbol *Label) { - return MCBinaryExpr::createAdd(getLabel(Label), - MCConstantExpr::create(1, Asm->OutContext), - Asm->OutContext); -} - const MCExpr *WinException::getOffset(const MCSymbol *OffsetOf, const MCSymbol *OffsetFrom) { return MCBinaryExpr::createSub( @@ -655,7 +649,7 @@ void WinException::emitSEHActionsForRange(const WinEHFuncInfo &FuncInfo, AddComment("LabelStart"); OS.emitValue(getLabel(BeginLabel), 4); AddComment("LabelEnd"); - OS.emitValue(getLabelPlusOne(EndLabel), 4); + OS.emitValue(getLabel(EndLabel), 4); AddComment(UME.IsFinally ? "FinallyFunclet" : UME.Filter ? "FilterFunction" : "CatchAll"); OS.emitValue(FilterOrFinally, 4); @@ -950,13 +944,7 @@ void WinException::computeIP2StateTable( if (!ChangeLabel) ChangeLabel = StateChange.PreviousEndLabel; // Emit an entry indicating that PCs after 'Label' have this EH state. - // NOTE: On ARM architectures, the StateFromIp automatically takes into - // account that the return address is after the call instruction (whose EH - // state we should be using), but on other platforms we need to +1 to the - // label so that we are using the correct EH state. - const MCExpr *LabelExpression = (isAArch64 || isThumb) - ? getLabel(ChangeLabel) - : getLabelPlusOne(ChangeLabel); + const MCExpr *LabelExpression = getLabel(ChangeLabel); IPToStateTable.push_back( std::make_pair(LabelExpression, StateChange.NewState)); // FIXME: assert that NewState is between CatchLow and CatchHigh. diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.h b/llvm/lib/CodeGen/AsmPrinter/WinException.h index 638589a..47dd30c 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinException.h +++ b/llvm/lib/CodeGen/AsmPrinter/WinException.h @@ -80,7 +80,6 @@ class LLVM_LIBRARY_VISIBILITY WinException : public EHStreamer { const MCExpr *create32bitRef(const MCSymbol *Value); const MCExpr *create32bitRef(const GlobalValue *GV); const MCExpr *getLabel(const MCSymbol *Label); - const MCExpr *getLabelPlusOne(const MCSymbol *Label); const MCExpr *getOffset(const MCSymbol *OffsetOf, const MCSymbol *OffsetFrom); const MCExpr *getOffsetPlusOne(const MCSymbol *OffsetOf, const MCSymbol *OffsetFrom); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index d7280ea..dc5dfab 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2189,23 +2189,11 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END; - // Get the underlying objects for the location passed on the lifetime - // marker. - SmallVector<const Value *, 4> Allocas; - getUnderlyingObjects(CI.getArgOperand(1), Allocas); - - // Iterate over each underlying object, creating lifetime markers for each - // static alloca. Quit if we find a non-static alloca. - for (const Value *V : Allocas) { - const AllocaInst *AI = dyn_cast<AllocaInst>(V); - if (!AI) - continue; - - if (!AI->isStaticAlloca()) - return true; + const AllocaInst *AI = cast<AllocaInst>(CI.getArgOperand(1)); + if (!AI->isStaticAlloca()) + return true; - MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); - } + MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); return true; } case Intrinsic::fake_use: { diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 11b3ac8..ed7b07f 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -10120,14 +10120,10 @@ LegalizerHelper::lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { return Legalized; } - bool IsVolatile = MemOp->isVolatile(); - // Don't try to optimize volatile. - if (IsVolatile) - return UnableToLegalize; - if (MaxLen && KnownLen > MaxLen) return UnableToLegalize; + bool IsVolatile = MemOp->isVolatile(); if (Opc == TargetOpcode::G_MEMCPY) { auto &MF = *MI.getParent()->getParent(); const auto &TLI = *MF.getSubtarget().getTargetLowering(); diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp index 525ef32..df162fc 100644 --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -600,8 +600,8 @@ static Value *getMask(Value *WideMask, unsigned Factor, bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic( IntrinsicInst *DI, SmallSetVector<Instruction *, 32> &DeadInsts) { - Value *LoadedVal = DI->getOperand(0); - if (!LoadedVal->hasOneUse()) + Instruction *LoadedVal = dyn_cast<Instruction>(DI->getOperand(0)); + if (!LoadedVal || !LoadedVal->hasOneUse()) return false; auto *LI = dyn_cast<LoadInst>(LoadedVal); @@ -645,58 +645,68 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic( } // Try and match this with target specific intrinsics. - if (!TLI->lowerDeinterleaveIntrinsicToLoad(cast<Instruction>(LoadedVal), Mask, - DI)) + if (!TLI->lowerDeinterleaveIntrinsicToLoad(LoadedVal, Mask, DI)) return false; DeadInsts.insert(DI); // We now have a target-specific load, so delete the old one. - DeadInsts.insert(cast<Instruction>(LoadedVal)); + DeadInsts.insert(LoadedVal); return true; } bool InterleavedAccessImpl::lowerInterleaveIntrinsic( - IntrinsicInst *II, SmallSetVector<Instruction *, 32> &DeadInsts) { - if (!II->hasOneUse()) + IntrinsicInst *IntII, SmallSetVector<Instruction *, 32> &DeadInsts) { + if (!IntII->hasOneUse()) return false; - Value *StoredBy = II->user_back(); - if (!isa<StoreInst, VPIntrinsic>(StoredBy)) + Instruction *StoredBy = dyn_cast<Instruction>(IntII->user_back()); + if (!StoredBy) + return false; + auto *SI = dyn_cast<StoreInst>(StoredBy); + auto *II = dyn_cast<IntrinsicInst>(StoredBy); + if (!SI && !II) return false; - SmallVector<Value *, 8> InterleaveValues(II->args()); - const unsigned Factor = getInterleaveIntrinsicFactor(II->getIntrinsicID()); + SmallVector<Value *, 8> InterleaveValues(IntII->args()); + const unsigned Factor = getInterleaveIntrinsicFactor(IntII->getIntrinsicID()); assert(Factor && "unexpected interleave intrinsic"); Value *Mask = nullptr; - if (auto *VPStore = dyn_cast<VPIntrinsic>(StoredBy)) { - if (VPStore->getIntrinsicID() != Intrinsic::vp_store) + if (II) { + // Check mask operand. Handle both all-true/false and interleaved mask. + Value *WideMask; + switch (II->getIntrinsicID()) { + default: return false; - - Value *WideMask = VPStore->getOperand(2); + case Intrinsic::vp_store: + WideMask = II->getOperand(2); + break; + case Intrinsic::masked_store: + WideMask = II->getOperand(3); + break; + } Mask = getMask(WideMask, Factor, cast<VectorType>(InterleaveValues[0]->getType())); if (!Mask) return false; - LLVM_DEBUG(dbgs() << "IA: Found a vp.store with interleave intrinsic " - << *II << " and factor = " << Factor << "\n"); + LLVM_DEBUG(dbgs() << "IA: Found a vp.store or masked.store with interleave" + << " intrinsic " << *IntII << " and factor = " + << Factor << "\n"); } else { - auto *SI = cast<StoreInst>(StoredBy); if (!SI->isSimple()) return false; - LLVM_DEBUG(dbgs() << "IA: Found a store with interleave intrinsic " << *II - << " and factor = " << Factor << "\n"); + LLVM_DEBUG(dbgs() << "IA: Found a store with interleave intrinsic " + << *IntII << " and factor = " << Factor << "\n"); } // Try and match this with target specific intrinsics. - if (!TLI->lowerInterleaveIntrinsicToStore(cast<Instruction>(StoredBy), Mask, - InterleaveValues)) + if (!TLI->lowerInterleaveIntrinsicToStore(StoredBy, Mask, InterleaveValues)) return false; // We now have a target-specific store, so delete the old one. - DeadInsts.insert(cast<Instruction>(StoredBy)); - DeadInsts.insert(II); + DeadInsts.insert(StoredBy); + DeadInsts.insert(IntII); return true; } diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp index b38a4d1c..90005bd 100644 --- a/llvm/lib/CodeGen/MachinePipeliner.cpp +++ b/llvm/lib/CodeGen/MachinePipeliner.cpp @@ -4279,8 +4279,8 @@ void LoopCarriedEdges::modifySUnits(std::vector<SUnit> &SUnits, !TII->isGlobalMemoryObject(FromMI) && !TII->isGlobalMemoryObject(ToMI) && !isSuccOrder(From, To)) { SDep Pred = Dep; - Pred.setSUnit(Src); - Dst->addPred(Pred); + Pred.setSUnit(From); + To->addPred(Pred); } } } diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index 76cba29..9d5c39c 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -771,24 +771,6 @@ static bool isSchedBoundary(MachineBasicBlock::iterator MI, MI->isFakeUse(); } -/// A region of an MBB for scheduling. -namespace { -struct SchedRegion { - /// RegionBegin is the first instruction in the scheduling region, and - /// RegionEnd is either MBB->end() or the scheduling boundary after the - /// last instruction in the scheduling region. These iterators cannot refer - /// to instructions outside of the identified scheduling region because - /// those may be reordered before scheduling this region. - MachineBasicBlock::iterator RegionBegin; - MachineBasicBlock::iterator RegionEnd; - unsigned NumRegionInstrs; - - SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, - unsigned N) : - RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {} -}; -} // end anonymous namespace - using MBBRegionsVector = SmallVector<SchedRegion, 16>; static void @@ -3725,7 +3707,8 @@ void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, RegionPolicy.OnlyBottomUp = true; // Allow the subtarget to override default policy. - MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs); + SchedRegion Region(Begin, End, NumRegionInstrs); + MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Region); // After subtarget overrides, apply command line options. if (!EnableRegPressure) { @@ -4338,7 +4321,8 @@ void PostGenericScheduler::initPolicy(MachineBasicBlock::iterator Begin, RegionPolicy.OnlyBottomUp = false; // Allow the subtarget to override default policy. - MF.getSubtarget().overridePostRASchedPolicy(RegionPolicy, NumRegionInstrs); + SchedRegion Region(Begin, End, NumRegionInstrs); + MF.getSubtarget().overridePostRASchedPolicy(RegionPolicy, Region); // After subtarget overrides, apply command line options. if (PostRADirection == MISched::TopDown) { diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index fed5e72..d3df434 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -12375,11 +12375,8 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) { TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))) { // Any flags available in a select/setcc fold will be on the setcc as they // migrated from fcmp - Flags = N0->getFlags(); - SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, VT, Cond0, Cond1, N1, - N2, N0.getOperand(2)); - SelectNode->setFlags(Flags); - return SelectNode; + return DAG.getNode(ISD::SELECT_CC, DL, VT, Cond0, Cond1, N1, N2, + N0.getOperand(2), N0->getFlags()); } if (SDValue ABD = foldSelectToABD(Cond0, Cond1, N1, N2, CC, DL)) @@ -16738,7 +16735,8 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) { // Fold freeze(op(x, ...)) -> op(freeze(x), ...). // Try to push freeze through instructions that propagate but don't produce // poison as far as possible. If an operand of freeze follows three - // conditions 1) one-use, and 2) does not produce poison then push + // conditions 1) one-use, 2) does not produce poison, and 3) has all but one + // guaranteed-non-poison operands (or is a BUILD_VECTOR or similar) then push // the freeze through to the operands that are not guaranteed non-poison. // NOTE: we will strip poison-generating flags, so ignore them here. if (DAG.canCreateUndefOrPoison(N0, /*PoisonOnly*/ false, @@ -16746,6 +16744,18 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) { N0->getNumValues() != 1 || !N0->hasOneUse()) return SDValue(); + // TOOD: we should always allow multiple operands, however this increases the + // likelihood of infinite loops due to the ReplaceAllUsesOfValueWith call + // below causing later nodes that share frozen operands to fold again and no + // longer being able to confirm other operands are not poison due to recursion + // depth limits on isGuaranteedNotToBeUndefOrPoison. + bool AllowMultipleMaybePoisonOperands = + N0.getOpcode() == ISD::SELECT_CC || N0.getOpcode() == ISD::SETCC || + N0.getOpcode() == ISD::BUILD_VECTOR || + N0.getOpcode() == ISD::BUILD_PAIR || + N0.getOpcode() == ISD::VECTOR_SHUFFLE || + N0.getOpcode() == ISD::CONCAT_VECTORS || N0.getOpcode() == ISD::FMUL; + // Avoid turning a BUILD_VECTOR that can be recognized as "all zeros", "all // ones" or "constant" into something that depends on FrozenUndef. We can // instead pick undef values to keep those properties, while at the same time @@ -16772,8 +16782,16 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) { if (DAG.isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly*/ false, /*Depth*/ 1)) continue; - if (MaybePoisonOperands.insert(Op).second) + bool HadMaybePoisonOperands = !MaybePoisonOperands.empty(); + bool IsNewMaybePoisonOperand = MaybePoisonOperands.insert(Op).second; + if (IsNewMaybePoisonOperand) MaybePoisonOperandNumbers.push_back(OpNo); + if (!HadMaybePoisonOperands) + continue; + if (IsNewMaybePoisonOperand && !AllowMultipleMaybePoisonOperands) { + // Multiple maybe-poison ops when not allowed - bail out. + return SDValue(); + } } // NOTE: the whole op may be not guaranteed to not be undef or poison because // it could create undef or poison due to it's poison-generating flags. @@ -22727,11 +22745,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) { const auto *LifetimeEnd = cast<LifetimeSDNode>(N); - if (!LifetimeEnd->hasOffset()) - return SDValue(); - - const BaseIndexOffset LifetimeEndBase(N->getOperand(1), SDValue(), - LifetimeEnd->getOffset(), false); + const BaseIndexOffset LifetimeEndBase(N->getOperand(1), SDValue(), 0, false); // We walk up the chains to find stores. SmallVector<SDValue, 8> Chains = {N->getOperand(0)}; @@ -29418,9 +29432,8 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const { return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1), - (LN->hasOffset()) ? LN->getOffset() : 0, - (LN->hasOffset()) ? LocationSize::precise(LN->getSize()) - : LocationSize::beforeOrAfterPointer(), + 0, + LocationSize::precise(LN->getSize()), (MachineMemOperand *)nullptr}; // Default. return {false /*isvolatile*/, diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 7266940..74172b2 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2785,19 +2785,17 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node, // In strict mode, we must avoid spurious exceptions, and therefore // must make sure to only emit a single STRICT_SINT_TO_FP. SDValue InCvt = DAG.getSelect(dl, SrcVT, SignBitTest, Or, Op0); - Fast = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, { DestVT, MVT::Other }, - { Node->getOperand(0), InCvt }); - Slow = DAG.getNode(ISD::STRICT_FADD, dl, { DestVT, MVT::Other }, - { Fast.getValue(1), Fast, Fast }); - Chain = Slow.getValue(1); // The STRICT_SINT_TO_FP inherits the exception mode from the // incoming STRICT_UINT_TO_FP node; the STRICT_FADD node can // never raise any exception. SDNodeFlags Flags; Flags.setNoFPExcept(Node->getFlags().hasNoFPExcept()); - Fast->setFlags(Flags); + Fast = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DestVT, MVT::Other}, + {Node->getOperand(0), InCvt}, Flags); Flags.setNoFPExcept(true); - Slow->setFlags(Flags); + Slow = DAG.getNode(ISD::STRICT_FADD, dl, {DestVT, MVT::Other}, + {Fast.getValue(1), Fast, Fast}, Flags); + Chain = Slow.getValue(1); } else { SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Or); Slow = DAG.getNode(ISD::FADD, dl, DestVT, SignCvt, SignCvt); @@ -3407,14 +3405,12 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { EVT VT = Operand.getValueType(); SDValue One = DAG.getConstantFP(1.0, dl, VT); SDValue Chain = DAG.getEntryNode(); - SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, dl, {VT, MVT::Other}, - {Chain, Operand, One}); - // Propagate existing flags on canonicalize, and additionally set // NoFPExcept. SDNodeFlags CanonicalizeFlags = Node->getFlags(); CanonicalizeFlags.setNoFPExcept(true); - Mul->setFlags(CanonicalizeFlags); + SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, dl, {VT, MVT::Other}, + {Chain, Operand, One}, CanonicalizeFlags); Results.push_back(Mul); break; @@ -4150,15 +4146,14 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { Tmp2 = Node->getOperand(1); Tmp3 = Node->getOperand(2); if (Tmp1.getOpcode() == ISD::SETCC) { - Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), - Tmp2, Tmp3, - cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); + Tmp1 = DAG.getSelectCC( + dl, Tmp1.getOperand(0), Tmp1.getOperand(1), Tmp2, Tmp3, + cast<CondCodeSDNode>(Tmp1.getOperand(2))->get(), Node->getFlags()); } else { - Tmp1 = DAG.getSelectCC(dl, Tmp1, - DAG.getConstant(0, dl, Tmp1.getValueType()), - Tmp2, Tmp3, ISD::SETNE); + Tmp1 = + DAG.getSelectCC(dl, Tmp1, DAG.getConstant(0, dl, Tmp1.getValueType()), + Tmp2, Tmp3, ISD::SETNE, Node->getFlags()); } - Tmp1->setFlags(Node->getFlags()); Results.push_back(Tmp1); break; case ISD::BR_JT: { @@ -4296,8 +4291,8 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { EVT Tmp1VT = Tmp1.getValueType(); Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, DAG.getBoolConstant(true, dl, VT, Tmp1VT), - DAG.getBoolConstant(false, dl, VT, Tmp1VT), Tmp3); - Tmp1->setFlags(Node->getFlags()); + DAG.getBoolConstant(false, dl, VT, Tmp1VT), Tmp3, + Node->getFlags()); Results.push_back(Tmp1); break; } @@ -4335,8 +4330,8 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { if (TLI.isCondCodeLegalOrCustom(InvCC, Tmp1.getSimpleValueType())) { // Use the new condition code and swap true and false Legalized = true; - Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC); - Tmp1->setFlags(Node->getFlags()); + Tmp1 = + DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC, Node->getFlags()); } else { // If The inverse is not legal, then try to swap the arguments using // the inverse condition code. @@ -4345,8 +4340,8 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { // The swapped inverse condition is legal, so swap true and false, // lhs and rhs. Legalized = true; - Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC); - Tmp1->setFlags(Node->getFlags()); + Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC, + Node->getFlags()); } } @@ -4365,15 +4360,14 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { // If we expanded the SETCC by swapping LHS and RHS, or by inverting the // condition code, create a new SELECT_CC node. if (CC.getNode()) { - Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), - Tmp1, Tmp2, Tmp3, Tmp4, CC); + Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, + Tmp2, Tmp3, Tmp4, CC, Node->getFlags()); } else { Tmp2 = DAG.getConstant(0, dl, Tmp1.getValueType()); CC = DAG.getCondCode(ISD::SETNE); Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, - Tmp2, Tmp3, Tmp4, CC); + Tmp2, Tmp3, Tmp4, CC, Node->getFlags()); } - Tmp1->setFlags(Node->getFlags()); } Results.push_back(Tmp1); break; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index f908a66..d2ecc133 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -2087,11 +2087,10 @@ void VectorLegalizer::ExpandSETCC(SDNode *Node, // Otherwise, SETCC for the given comparison type must be completely // illegal; expand it into a SELECT_CC. EVT VT = Node->getValueType(0); - LHS = - DAG.getNode(ISD::SELECT_CC, dl, VT, LHS, RHS, - DAG.getBoolConstant(true, dl, VT, LHS.getValueType()), - DAG.getBoolConstant(false, dl, VT, LHS.getValueType()), CC); - LHS->setFlags(Node->getFlags()); + LHS = DAG.getNode(ISD::SELECT_CC, dl, VT, LHS, RHS, + DAG.getBoolConstant(true, dl, VT, LHS.getValueType()), + DAG.getBoolConstant(false, dl, VT, LHS.getValueType()), + CC, Node->getFlags()); } Results.push_back(LHS); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 32c5961..1661814 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -372,9 +372,9 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_OverflowOp(SDNode *N, SDVTList ScalarVTs = DAG.getVTList( ResVT.getVectorElementType(), OvVT.getVectorElementType()); - SDNode *ScalarNode = DAG.getNode( - N->getOpcode(), DL, ScalarVTs, ScalarLHS, ScalarRHS).getNode(); - ScalarNode->setFlags(N->getFlags()); + SDNode *ScalarNode = DAG.getNode(N->getOpcode(), DL, ScalarVTs, + {ScalarLHS, ScalarRHS}, N->getFlags()) + .getNode(); // Replace the other vector result not being explicitly scalarized here. unsigned OtherNo = 1 - ResNo; @@ -1898,7 +1898,7 @@ SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(SDNode *N, unsigned ResNE) { NE = ResNE; //The results of each unrolled operation, including the chain. - EVT ChainVTs[] = {EltVT, MVT::Other}; + SDVTList ChainVTs = DAG.getVTList(EltVT, MVT::Other); SmallVector<SDValue, 8> Chains; unsigned i; @@ -1914,8 +1914,8 @@ SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(SDNode *N, unsigned ResNE) { Operands[j] = Operand; } } - SDValue Scalar = DAG.getNode(N->getOpcode(), dl, ChainVTs, Operands); - Scalar.getNode()->setFlags(N->getFlags()); + SDValue Scalar = + DAG.getNode(N->getOpcode(), dl, ChainVTs, Operands, N->getFlags()); //Add in the scalar as well as its chain value to the //result vectors. @@ -1956,10 +1956,10 @@ void DAGTypeLegalizer::SplitVecRes_OverflowOp(SDNode *N, unsigned ResNo, unsigned Opcode = N->getOpcode(); SDVTList LoVTs = DAG.getVTList(LoResVT, LoOvVT); SDVTList HiVTs = DAG.getVTList(HiResVT, HiOvVT); - SDNode *LoNode = DAG.getNode(Opcode, dl, LoVTs, LoLHS, LoRHS).getNode(); - SDNode *HiNode = DAG.getNode(Opcode, dl, HiVTs, HiLHS, HiRHS).getNode(); - LoNode->setFlags(N->getFlags()); - HiNode->setFlags(N->getFlags()); + SDNode *LoNode = + DAG.getNode(Opcode, dl, LoVTs, {LoLHS, LoRHS}, N->getFlags()).getNode(); + SDNode *HiNode = + DAG.getNode(Opcode, dl, HiVTs, {HiLHS, HiRHS}, N->getFlags()).getNode(); Lo = SDValue(LoNode, ResNo); Hi = SDValue(HiNode, ResNo); @@ -2669,10 +2669,8 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOpWithTwoResults(SDNode *N, else std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); - Lo = DAG.getNode(N->getOpcode(), dl, {LoVT, LoVT1}, Lo); - Hi = DAG.getNode(N->getOpcode(), dl, {HiVT, HiVT1}, Hi); - Lo->setFlags(N->getFlags()); - Hi->setFlags(N->getFlags()); + Lo = DAG.getNode(N->getOpcode(), dl, {LoVT, LoVT1}, Lo, N->getFlags()); + Hi = DAG.getNode(N->getOpcode(), dl, {HiVT, HiVT1}, Hi, N->getFlags()); SDNode *HiNode = Hi.getNode(); SDNode *LoNode = Lo.getNode(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 2458115..773ff48 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -786,10 +786,7 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { break; case ISD::LIFETIME_START: case ISD::LIFETIME_END: - if (cast<LifetimeSDNode>(N)->hasOffset()) { - ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); - ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); - } + ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); break; case ISD::PSEUDO_PROBE: ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid()); @@ -3036,7 +3033,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *this, Depth); break; -} + } // We don't support other cases than those above for scalable vectors at // the moment. @@ -9364,7 +9361,7 @@ SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, - int64_t Size, int64_t Offset) { + int64_t Size) { const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; const auto VTs = getVTList(MVT::Other); SDValue Ops[2] = { @@ -9377,13 +9374,12 @@ SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, AddNodeIDNode(ID, Opcode, VTs, Ops); ID.AddInteger(FrameIndex); ID.AddInteger(Size); - ID.AddInteger(Offset); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) return SDValue(E, 0); - LifetimeSDNode *N = newSDNode<LifetimeSDNode>( - Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); + LifetimeSDNode *N = newSDNode<LifetimeSDNode>(Opcode, dl.getIROrder(), + dl.getDebugLoc(), VTs, Size); createOperands(N, Ops); CSEMap.InsertNode(N, IP); InsertNode(N); @@ -10563,7 +10559,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef<SDUse> Ops) { switch (Ops.size()) { case 0: return getNode(Opcode, DL, VT); - case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); + case 1: return getNode(Opcode, DL, VT, Ops[0].get()); case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); default: break; @@ -10699,7 +10695,16 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { - return getNode(Opcode, DL, getVTList(ResultTys), Ops); + SDNodeFlags Flags; + if (Inserter) + Flags = Inserter->getFlags(); + return getNode(Opcode, DL, getVTList(ResultTys), Ops, Flags); +} + +SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, + ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops, + const SDNodeFlags Flags) { + return getNode(Opcode, DL, getVTList(ResultTys), Ops, Flags); } SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, @@ -10855,26 +10860,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) && "Invalid STRICT_FP_ROUND!"); break; -#if 0 - // FIXME: figure out how to safely handle things like - // int foo(int x) { return 1 << (x & 255); } - // int bar() { return foo(256); } - case ISD::SRA_PARTS: - case ISD::SRL_PARTS: - case ISD::SHL_PARTS: - if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && - cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) - return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); - else if (N3.getOpcode() == ISD::AND) - if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { - // If the and is only masking out bits that cannot effect the shift, - // eliminate the and. - unsigned NumBits = VT.getScalarSizeInBits()*2; - if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) - return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); - } - break; -#endif } // Memoize the node unless it returns a glue result. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp index da92aaa..8f08046 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp @@ -303,10 +303,7 @@ BaseIndexOffset BaseIndexOffset::match(const SDNode *N, if (const auto *LS0 = dyn_cast<LSBaseSDNode>(N)) return matchLSNode(LS0, DAG); if (const auto *LN = dyn_cast<LifetimeSDNode>(N)) { - if (LN->hasOffset()) - return BaseIndexOffset(LN->getOperand(1), SDValue(), LN->getOffset(), - false); - return BaseIndexOffset(LN->getOperand(1), SDValue(), false); + return BaseIndexOffset(LN->getOperand(1), SDValue(), 0, false); } return BaseIndexOffset(); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 01e5312..1636465 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7596,32 +7596,17 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, const int64_t ObjectSize = cast<ConstantInt>(I.getArgOperand(0))->getSExtValue(); - Value *const ObjectPtr = I.getArgOperand(1); - SmallVector<const Value *, 4> Allocas; - getUnderlyingObjects(ObjectPtr, Allocas); + const AllocaInst *LifetimeObject = cast<AllocaInst>(I.getArgOperand(1)); - for (const Value *Alloca : Allocas) { - const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca); - - // Could not find an Alloca. - if (!LifetimeObject) - continue; - - // First check that the Alloca is static, otherwise it won't have a - // valid frame index. - auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); - if (SI == FuncInfo.StaticAllocaMap.end()) - return; + // First check that the Alloca is static, otherwise it won't have a + // valid frame index. + auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); + if (SI == FuncInfo.StaticAllocaMap.end()) + return; - const int FrameIndex = SI->second; - int64_t Offset; - if (GetPointerBaseWithConstantOffset( - ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject) - Offset = -1; // Cannot determine offset from alloca to lifetime object. - Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize, - Offset); - DAG.setRoot(Res); - } + const int FrameIndex = SI->second; + Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize); + DAG.setRoot(Res); return; } case Intrinsic::pseudoprobe: { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index 7fc1558..9474587 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -947,8 +947,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const { << ASC->getDestAddressSpace() << ']'; } else if (const LifetimeSDNode *LN = dyn_cast<LifetimeSDNode>(this)) { - if (LN->hasOffset()) - OS << "<" << LN->getOffset() << " to " << LN->getOffset() + LN->getSize() << ">"; + OS << "<0 to " << LN->getSize() << ">"; } else if (const auto *AA = dyn_cast<AssertAlignSDNode>(this)) { OS << '<' << AA->getAlign().value() << '>'; } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 37fddcf..1764910 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8129,7 +8129,7 @@ static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { return ISD::matchUnaryPredicate( Z, [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, - /*AllowUndef=*/true, /*AllowTruncation=*/true); + /*AllowUndefs=*/true, /*AllowTruncation=*/true); } static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG) { @@ -8634,9 +8634,8 @@ TargetLowering::createSelectForFMINNUM_FMAXNUM(SDNode *Node, return SDValue(); SDValue Op1 = Node->getOperand(0); SDValue Op2 = Node->getOperand(1); - SDValue SelCC = DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred); - SelCC->setFlags(Node->getFlags()); - return SelCC; + return DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred, + Node->getFlags()); } return SDValue(); @@ -11995,8 +11994,7 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node, // Get the mask value and add it to the current output position. This // either increments by 1 if MaskI is true or adds 0 otherwise. // Freeze in case we have poison/undef mask entries. - SDValue MaskI = - DAG.getFreeze(DAG.getExtractVectorElt(DL, MaskScalarVT, Mask, I)); + SDValue MaskI = DAG.getExtractVectorElt(DL, MaskScalarVT, Mask, I); MaskI = DAG.getFreeze(MaskI); MaskI = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, MaskI); MaskI = DAG.getNode(ISD::ZERO_EXTEND, DL, PositionVT, MaskI); diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 3e40915..e5a4e1e 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -2401,8 +2401,9 @@ static void writeDIFile(raw_ostream &Out, const DIFile *N, AsmWriterContext &) { // Print all values for checksum together, or not at all. if (N->getChecksum()) Printer.printChecksum(*N->getChecksum()); - Printer.printString("source", N->getSource().value_or(StringRef()), - /* ShouldSkipEmpty */ true); + if (N->getSource()) + Printer.printString("source", *N->getSource(), + /* ShouldSkipEmpty */ false); Out << ")"; } diff --git a/llvm/lib/IR/PassInstrumentation.cpp b/llvm/lib/IR/PassInstrumentation.cpp index 94ad124..70bbe8f 100644 --- a/llvm/lib/IR/PassInstrumentation.cpp +++ b/llvm/lib/IR/PassInstrumentation.cpp @@ -23,6 +23,7 @@ template struct LLVM_EXPORT_TEMPLATE Any::TypeId<const Loop *>; void PassInstrumentationCallbacks::addClassToPassName(StringRef ClassName, StringRef PassName) { + assert(!PassName.empty() && "PassName can't be empty!"); ClassToPassName.try_emplace(ClassName, PassName.str()); } @@ -33,7 +34,10 @@ PassInstrumentationCallbacks::getPassNameForClassName(StringRef ClassName) { Fn(); ClassToPassNameCallbacks.clear(); } - return ClassToPassName[ClassName]; + auto PassNameIter = ClassToPassName.find(ClassName); + if (PassNameIter != ClassToPassName.end()) + return PassNameIter->second; + return {}; } AnalysisKey PassInstrumentationAnalysis::Key; diff --git a/llvm/lib/MC/MCDwarf.cpp b/llvm/lib/MC/MCDwarf.cpp index b1dced7..e7c0d37 100644 --- a/llvm/lib/MC/MCDwarf.cpp +++ b/llvm/lib/MC/MCDwarf.cpp @@ -447,10 +447,17 @@ static void emitOneV5FileEntry(MCStreamer *MCOS, const MCDwarfFile &DwarfFile, StringRef(reinterpret_cast<const char *>(Cksum.data()), Cksum.size())); } if (HasAnySource) { + // From https://dwarfstd.org/issues/180201.1.html + // * The value is an empty null-terminated string if no source is available + StringRef Source = DwarfFile.Source.value_or(StringRef()); + // * If the source is available but is an empty file then the value is a + // null-terminated single "\n". + if (DwarfFile.Source && DwarfFile.Source->empty()) + Source = "\n"; if (LineStr) - LineStr->emitRef(MCOS, DwarfFile.Source.value_or(StringRef())); + LineStr->emitRef(MCOS, Source); else { - MCOS->emitBytes(DwarfFile.Source.value_or(StringRef())); // Source and... + MCOS->emitBytes(Source); // Source and... MCOS->emitBytes(StringRef("\0", 1)); // its null terminator. } } diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp index 5597d7d..0919c6a 100644 --- a/llvm/lib/Object/ELFObjectFile.cpp +++ b/llvm/lib/Object/ELFObjectFile.cpp @@ -620,7 +620,9 @@ StringRef ELFObjectFileBase::getAMDGPUCPUName() const { StringRef ELFObjectFileBase::getNVPTXCPUName() const { assert(getEMachine() == ELF::EM_CUDA); - unsigned SM = getPlatformFlags() & ELF::EF_CUDA_SM; + unsigned SM = getEIdentABIVersion() == ELF::ELFABIVERSION_CUDA_V1 + ? getPlatformFlags() & ELF::EF_CUDA_SM + : getPlatformFlags() & ELF::EF_CUDA_SM_MASK; switch (SM) { // Fermi architecture. @@ -679,7 +681,18 @@ StringRef ELFObjectFileBase::getNVPTXCPUName() const { // Hopper architecture. case ELF::EF_CUDA_SM90: - return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_90a" : "sm_90"; + return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS_V1 ? "sm_90a" + : "sm_90"; + + // Blackwell architecture. + case ELF::EF_CUDA_SM100: + return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_100a" + : "sm_100"; + + // Rubin architecture. + case ELF::EF_CUDA_SM120: + return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_120a" + : "sm_120"; default: llvm_unreachable("Unknown EF_CUDA_SM value"); } diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index 12fc976..201bfe0 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -1205,32 +1205,36 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, Register DstReg = MI.getOperand(0).getReg(); if (DstReg == MI.getOperand(3).getReg()) { // Expand to BIT - BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8 - : AArch64::BITv16i8)) - .add(MI.getOperand(0)) - .add(MI.getOperand(3)) - .add(MI.getOperand(2)) - .add(MI.getOperand(1)); + auto I = BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8 + : AArch64::BITv16i8)) + .add(MI.getOperand(0)) + .add(MI.getOperand(3)) + .add(MI.getOperand(2)) + .add(MI.getOperand(1)); + transferImpOps(MI, I, I); } else if (DstReg == MI.getOperand(2).getReg()) { // Expand to BIF - BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8 - : AArch64::BIFv16i8)) - .add(MI.getOperand(0)) - .add(MI.getOperand(2)) - .add(MI.getOperand(3)) - .add(MI.getOperand(1)); + auto I = BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8 + : AArch64::BIFv16i8)) + .add(MI.getOperand(0)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)) + .add(MI.getOperand(1)); + transferImpOps(MI, I, I); } else { // Expand to BSL, use additional move if required if (DstReg == MI.getOperand(1).getReg()) { - BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8 - : AArch64::BSLv16i8)) - .add(MI.getOperand(0)) - .add(MI.getOperand(1)) - .add(MI.getOperand(2)) - .add(MI.getOperand(3)); + auto I = + BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8 + : AArch64::BSLv16i8)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)); + transferImpOps(MI, I, I); } else { BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8 @@ -1240,15 +1244,17 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, getRenamableRegState(MI.getOperand(0).isRenamable())) .add(MI.getOperand(1)) .add(MI.getOperand(1)); - BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8 - : AArch64::BSLv16i8)) - .add(MI.getOperand(0)) - .addReg(DstReg, - RegState::Kill | - getRenamableRegState(MI.getOperand(0).isRenamable())) - .add(MI.getOperand(2)) - .add(MI.getOperand(3)); + auto I2 = + BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8 + : AArch64::BSLv16i8)) + .add(MI.getOperand(0)) + .addReg(DstReg, + RegState::Kill | getRenamableRegState( + MI.getOperand(0).isRenamable())) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)); + transferImpOps(MI, I2, I2); } } MI.eraseFromParent(); diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp index 0ddd17c..abcd550 100644 --- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp +++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp @@ -8,8 +8,8 @@ // // This pass performs below peephole optimizations on MIR level. // -// 1. MOVi32imm + ANDWrr ==> ANDWri + ANDWri -// MOVi64imm + ANDXrr ==> ANDXri + ANDXri +// 1. MOVi32imm + ANDS?Wrr ==> ANDWri + ANDS?Wri +// MOVi64imm + ANDS?Xrr ==> ANDXri + ANDS?Xri // // 2. MOVi32imm + ADDWrr ==> ADDWRi + ADDWRi // MOVi64imm + ADDXrr ==> ANDXri + ANDXri @@ -126,7 +126,7 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass { bool visitADDSSUBS(OpcodePair PosOpcs, OpcodePair NegOpcs, MachineInstr &MI); template <typename T> - bool visitAND(unsigned Opc, MachineInstr &MI); + bool visitAND(unsigned Opc, MachineInstr &MI, unsigned OtherOpc = 0); bool visitORR(MachineInstr &MI); bool visitCSEL(MachineInstr &MI); bool visitINSERT(MachineInstr &MI); @@ -194,12 +194,12 @@ static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) { } template <typename T> -bool AArch64MIPeepholeOpt::visitAND( - unsigned Opc, MachineInstr &MI) { +bool AArch64MIPeepholeOpt::visitAND(unsigned Opc, MachineInstr &MI, + unsigned OtherOpc) { // Try below transformation. // - // MOVi32imm + ANDWrr ==> ANDWri + ANDWri - // MOVi64imm + ANDXrr ==> ANDXri + ANDXri + // MOVi32imm + ANDS?Wrr ==> ANDWri + ANDS?Wri + // MOVi64imm + ANDS?Xrr ==> ANDXri + ANDS?Xri // // The mov pseudo instruction could be expanded to multiple mov instructions // later. Let's try to split the constant operand of mov instruction into two @@ -208,10 +208,10 @@ bool AArch64MIPeepholeOpt::visitAND( return splitTwoPartImm<T>( MI, - [Opc](T Imm, unsigned RegSize, T &Imm0, - T &Imm1) -> std::optional<OpcodePair> { + [Opc, OtherOpc](T Imm, unsigned RegSize, T &Imm0, + T &Imm1) -> std::optional<OpcodePair> { if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1)) - return std::make_pair(Opc, Opc); + return std::make_pair(Opc, !OtherOpc ? Opc : OtherOpc); return std::nullopt; }, [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0, @@ -864,6 +864,12 @@ bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) { case AArch64::ANDXrr: Changed |= visitAND<uint64_t>(AArch64::ANDXri, MI); break; + case AArch64::ANDSWrr: + Changed |= visitAND<uint32_t>(AArch64::ANDWri, MI, AArch64::ANDSWri); + break; + case AArch64::ANDSXrr: + Changed |= visitAND<uint64_t>(AArch64::ANDXri, MI, AArch64::ANDSXri); + break; case AArch64::ORRWrs: Changed |= visitORR(MI); break; diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp index 2409cc8..0f4f012 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -534,7 +534,7 @@ unsigned AArch64Subtarget::classifyGlobalFunctionReference( } void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const { + const SchedRegion &Region) const { // LNT run (at least on Cyclone) showed reasonably significant gains for // bi-directional scheduling. 253.perlbmk. Policy.OnlyTopDown = false; diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index 154db3c..061ed61 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -343,7 +343,8 @@ public: } void overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const override; + const SchedRegion &Region) const override; + void adjustSchedDependency(SUnit *Def, int DefOpIdx, SUnit *Use, int UseOpIdx, SDep &Dep, const TargetSchedModel *SchedModel) const override; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 00c7f0e..3412bb5 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1872,6 +1872,23 @@ static SDValue matchZExtFromI32(SDValue Op) { return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue(); } +// If this matches *_extend i32:x, return x +// Otherwise if the value is I32 returns x. +static SDValue matchExtFromI32orI32(SDValue Op, bool IsSigned, + const SelectionDAG *DAG) { + if (Op.getValueType() == MVT::i32) + return Op; + + if (Op.getOpcode() != (IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND) && + Op.getOpcode() != ISD::ANY_EXTEND && + !(DAG->SignBitIsZero(Op) && + Op.getOpcode() == (IsSigned ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND))) + return SDValue(); + + SDValue ExtSrc = Op.getOperand(0); + return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue(); +} + // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N, SDValue Addr, @@ -2159,17 +2176,59 @@ bool AMDGPUDAGToDAGISel::isSOffsetLegalWithImmOffset(SDValue *SOffset, return true; } +// Given \p Offset and load node \p N check if an \p Offset is a multiple of +// the load byte size. If it is update \p Offset to a pre-scaled value and +// return true. +bool AMDGPUDAGToDAGISel::SelectScaleOffset(SDNode *N, SDValue &Offset, + bool IsSigned) const { + bool ScaleOffset = false; + if (!Subtarget->hasScaleOffset() || !Offset) + return false; + + unsigned Size = + (unsigned)cast<MemSDNode>(N)->getMemoryVT().getFixedSizeInBits() / 8; + + SDValue Off = Offset; + if (SDValue Ext = matchExtFromI32orI32(Offset, IsSigned, CurDAG)) + Off = Ext; + + if (isPowerOf2_32(Size) && Off.getOpcode() == ISD::SHL) { + if (auto *C = dyn_cast<ConstantSDNode>(Off.getOperand(1))) + ScaleOffset = C->getZExtValue() == Log2_32(Size); + } else if (Offset.getOpcode() == ISD::MUL || + (IsSigned && Offset.getOpcode() == AMDGPUISD::MUL_I24) || + Offset.getOpcode() == AMDGPUISD::MUL_U24 || + (Offset.isMachineOpcode() && + Offset.getMachineOpcode() == + (IsSigned ? AMDGPU::S_MUL_I64_I32_PSEUDO + : AMDGPU::S_MUL_U64_U32_PSEUDO))) { + if (auto *C = dyn_cast<ConstantSDNode>(Offset.getOperand(1))) + ScaleOffset = C->getZExtValue() == Size; + } + + if (ScaleOffset) + Offset = Off.getOperand(0); + + return ScaleOffset; +} + // Match an immediate (if Offset is not null) or an SGPR (if SOffset is // not null) offset. If Imm32Only is true, match only 32-bit immediate // offsets available on CI. -bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode, +bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDNode *N, SDValue ByteOffsetNode, SDValue *SOffset, SDValue *Offset, bool Imm32Only, bool IsBuffer, - bool HasSOffset, - int64_t ImmOffset) const { + bool HasSOffset, int64_t ImmOffset, + bool *ScaleOffset) const { assert((!SOffset || !Offset) && "Cannot match both soffset and offset at the same time!"); + if (ScaleOffset) { + assert(N && SOffset); + + *ScaleOffset = SelectScaleOffset(N, ByteOffsetNode, false /* IsSigned */); + } + ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode); if (!C) { if (!SOffset) @@ -2254,24 +2313,25 @@ SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const { // Match a base and an immediate (if Offset is not null) or an SGPR (if // SOffset is not null) or an immediate+SGPR offset. If Imm32Only is // true, match only 32-bit immediate offsets available on CI. -bool AMDGPUDAGToDAGISel::SelectSMRDBaseOffset(SDValue Addr, SDValue &SBase, - SDValue *SOffset, SDValue *Offset, - bool Imm32Only, bool IsBuffer, - bool HasSOffset, - int64_t ImmOffset) const { +bool AMDGPUDAGToDAGISel::SelectSMRDBaseOffset(SDNode *N, SDValue Addr, + SDValue &SBase, SDValue *SOffset, + SDValue *Offset, bool Imm32Only, + bool IsBuffer, bool HasSOffset, + int64_t ImmOffset, + bool *ScaleOffset) const { if (SOffset && Offset) { assert(!Imm32Only && !IsBuffer); SDValue B; - if (!SelectSMRDBaseOffset(Addr, B, nullptr, Offset, false, false, true)) + if (!SelectSMRDBaseOffset(N, Addr, B, nullptr, Offset, false, false, true)) return false; int64_t ImmOff = 0; if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(*Offset)) ImmOff = C->getSExtValue(); - return SelectSMRDBaseOffset(B, SBase, SOffset, nullptr, false, false, true, - ImmOff); + return SelectSMRDBaseOffset(N, B, SBase, SOffset, nullptr, false, false, + true, ImmOff, ScaleOffset); } // A 32-bit (address + offset) should not cause unsigned 32-bit integer @@ -2291,23 +2351,25 @@ bool AMDGPUDAGToDAGISel::SelectSMRDBaseOffset(SDValue Addr, SDValue &SBase, if (!N0 || !N1) return false; - if (SelectSMRDOffset(N1, SOffset, Offset, Imm32Only, IsBuffer, HasSOffset, - ImmOffset)) { + if (SelectSMRDOffset(N, N1, SOffset, Offset, Imm32Only, IsBuffer, HasSOffset, + ImmOffset, ScaleOffset)) { SBase = N0; return true; } - if (SelectSMRDOffset(N0, SOffset, Offset, Imm32Only, IsBuffer, HasSOffset, - ImmOffset)) { + if (SelectSMRDOffset(N, N0, SOffset, Offset, Imm32Only, IsBuffer, HasSOffset, + ImmOffset, ScaleOffset)) { SBase = N1; return true; } return false; } -bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase, +bool AMDGPUDAGToDAGISel::SelectSMRD(SDNode *N, SDValue Addr, SDValue &SBase, SDValue *SOffset, SDValue *Offset, - bool Imm32Only) const { - if (SelectSMRDBaseOffset(Addr, SBase, SOffset, Offset, Imm32Only)) { + bool Imm32Only, bool *ScaleOffset) const { + if (SelectSMRDBaseOffset(N, Addr, SBase, SOffset, Offset, Imm32Only, + /* IsBuffer */ false, /* HasSOffset */ false, + /* ImmOffset */ 0, ScaleOffset)) { SBase = Expand32BitAddress(SBase); return true; } @@ -2323,36 +2385,51 @@ bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase, bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const { - return SelectSMRD(Addr, SBase, /* SOffset */ nullptr, &Offset); + return SelectSMRD(/* N */ nullptr, Addr, SBase, /* SOffset */ nullptr, + &Offset); } bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const { assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); - return SelectSMRD(Addr, SBase, /* SOffset */ nullptr, &Offset, - /* Imm32Only */ true); + return SelectSMRD(/* N */ nullptr, Addr, SBase, /* SOffset */ nullptr, + &Offset, /* Imm32Only */ true); } -bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase, - SDValue &SOffset) const { - return SelectSMRD(Addr, SBase, &SOffset, /* Offset */ nullptr); +bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDNode *N, SDValue Addr, SDValue &SBase, + SDValue &SOffset, SDValue &CPol) const { + bool ScaleOffset; + if (!SelectSMRD(N, Addr, SBase, &SOffset, /* Offset */ nullptr, + /* Imm32Only */ false, &ScaleOffset)) + return false; + + CPol = CurDAG->getTargetConstant(ScaleOffset ? AMDGPU::CPol::SCAL : 0, + SDLoc(N), MVT::i32); + return true; } -bool AMDGPUDAGToDAGISel::SelectSMRDSgprImm(SDValue Addr, SDValue &SBase, - SDValue &SOffset, - SDValue &Offset) const { - return SelectSMRD(Addr, SBase, &SOffset, &Offset); +bool AMDGPUDAGToDAGISel::SelectSMRDSgprImm(SDNode *N, SDValue Addr, + SDValue &SBase, SDValue &SOffset, + SDValue &Offset, + SDValue &CPol) const { + bool ScaleOffset; + if (!SelectSMRD(N, Addr, SBase, &SOffset, &Offset, false, &ScaleOffset)) + return false; + + CPol = CurDAG->getTargetConstant(ScaleOffset ? AMDGPU::CPol::SCAL : 0, + SDLoc(N), MVT::i32); + return true; } bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue N, SDValue &Offset) const { - return SelectSMRDOffset(N, /* SOffset */ nullptr, &Offset, + return SelectSMRDOffset(/* N */ nullptr, N, /* SOffset */ nullptr, &Offset, /* Imm32Only */ false, /* IsBuffer */ true); } bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue N, SDValue &Offset) const { assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); - return SelectSMRDOffset(N, /* SOffset */ nullptr, &Offset, + return SelectSMRDOffset(/* N */ nullptr, N, /* SOffset */ nullptr, &Offset, /* Imm32Only */ true, /* IsBuffer */ true); } @@ -2361,9 +2438,9 @@ bool AMDGPUDAGToDAGISel::SelectSMRDBufferSgprImm(SDValue N, SDValue &SOffset, // Match the (soffset + offset) pair as a 32-bit register base and // an immediate offset. return N.getValueType() == MVT::i32 && - SelectSMRDBaseOffset(N, /* SBase */ SOffset, /* SOffset*/ nullptr, - &Offset, /* Imm32Only */ false, - /* IsBuffer */ true); + SelectSMRDBaseOffset(/* N */ nullptr, N, /* SBase */ SOffset, + /* SOffset*/ nullptr, &Offset, + /* Imm32Only */ false, /* IsBuffer */ true); } bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h index acbab3d..f7c7b3e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h @@ -176,22 +176,28 @@ private: bool SelectScratchSVAddr(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &SAddr, SDValue &Offset) const; - bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue *SOffset, + bool SelectSMRDOffset(SDNode *N, SDValue ByteOffsetNode, SDValue *SOffset, SDValue *Offset, bool Imm32Only = false, bool IsBuffer = false, bool HasSOffset = false, - int64_t ImmOffset = 0) const; + int64_t ImmOffset = 0, + bool *ScaleOffset = nullptr) const; SDValue Expand32BitAddress(SDValue Addr) const; - bool SelectSMRDBaseOffset(SDValue Addr, SDValue &SBase, SDValue *SOffset, - SDValue *Offset, bool Imm32Only = false, - bool IsBuffer = false, bool HasSOffset = false, - int64_t ImmOffset = 0) const; - bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue *SOffset, - SDValue *Offset, bool Imm32Only = false) const; + bool SelectSMRDBaseOffset(SDNode *N, SDValue Addr, SDValue &SBase, + SDValue *SOffset, SDValue *Offset, + bool Imm32Only = false, bool IsBuffer = false, + bool HasSOffset = false, int64_t ImmOffset = 0, + bool *ScaleOffset = nullptr) const; + bool SelectSMRD(SDNode *N, SDValue Addr, SDValue &SBase, SDValue *SOffset, + SDValue *Offset, bool Imm32Only = false, + bool *ScaleOffset = nullptr) const; bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const; bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const; - bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &SOffset) const; - bool SelectSMRDSgprImm(SDValue Addr, SDValue &SBase, SDValue &SOffset, - SDValue &Offset) const; + bool SelectScaleOffset(SDNode *N, SDValue &Offset, bool IsSigned) const; + bool SelectSMRDSgpr(SDNode *N, SDValue Addr, SDValue &SBase, SDValue &SOffset, + SDValue &CPol) const; + bool SelectSMRDSgprImm(SDNode *N, SDValue Addr, SDValue &SBase, + SDValue &SOffset, SDValue &Offset, + SDValue &CPol) const; bool SelectSMRDBufferImm(SDValue N, SDValue &Offset) const; bool SelectSMRDBufferImm32(SDValue N, SDValue &Offset) const; bool SelectSMRDBufferSgprImm(SDValue N, SDValue &SOffset, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 8975486..d2e718c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -3494,25 +3494,74 @@ bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const { } /// Match a zero extend from a 32-bit value to 64-bits. -static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { +Register AMDGPUInstructionSelector::matchZeroExtendFromS32(Register Reg) const { Register ZExtSrc; - if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) - return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); + if (mi_match(Reg, *MRI, m_GZExt(m_Reg(ZExtSrc)))) + return MRI->getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) - const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); + const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) return Register(); assert(Def->getNumOperands() == 3 && - MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64)); - if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { + MRI->getType(Def->getOperand(0).getReg()) == LLT::scalar(64)); + if (mi_match(Def->getOperand(2).getReg(), *MRI, m_ZeroInt())) { return Def->getOperand(1).getReg(); } return Register(); } +/// Match a sign extend from a 32-bit value to 64-bits. +Register AMDGPUInstructionSelector::matchSignExtendFromS32(Register Reg) const { + Register SExtSrc; + if (mi_match(Reg, *MRI, m_GSExt(m_Reg(SExtSrc)))) + return MRI->getType(SExtSrc) == LLT::scalar(32) ? SExtSrc : Register(); + + // Match legalized form %sext = G_MERGE_VALUES (s32 %x), G_ASHR((S32 %x, 31)) + const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); + if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) + return Register(); + + assert(Def->getNumOperands() == 3 && + MRI->getType(Def->getOperand(0).getReg()) == LLT::scalar(64)); + if (mi_match(Def->getOperand(2).getReg(), *MRI, + m_GAShr(m_SpecificReg(Def->getOperand(1).getReg()), + m_SpecificICst(31)))) + return Def->getOperand(1).getReg(); + + if (VT->signBitIsZero(Reg)) + return matchZeroExtendFromS32(Reg); + + return Register(); +} + +/// Match a zero extend from a 32-bit value to 64-bits, or \p Reg itself if it +/// is 32-bit. +Register +AMDGPUInstructionSelector::matchZeroExtendFromS32OrS32(Register Reg) const { + return MRI->getType(Reg) == LLT::scalar(32) ? Reg + : matchZeroExtendFromS32(Reg); +} + +/// Match a sign extend from a 32-bit value to 64-bits, or \p Reg itself if it +/// is 32-bit. +Register +AMDGPUInstructionSelector::matchSignExtendFromS32OrS32(Register Reg) const { + return MRI->getType(Reg) == LLT::scalar(32) ? Reg + : matchSignExtendFromS32(Reg); +} + +Register +AMDGPUInstructionSelector::matchExtendFromS32OrS32(Register Reg, + bool IsSigned) const { + if (IsSigned) + return matchSignExtendFromS32OrS32(Reg); + + return matchZeroExtendFromS32OrS32(Reg); +} + Register AMDGPUInstructionSelector::matchAnyExtendFromS32(Register Reg) const { Register AnyExtSrc; if (mi_match(Reg, *MRI, m_GAnyExt(m_Reg(AnyExtSrc)))) @@ -3581,7 +3630,7 @@ bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{ getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); if (isSGPR(SAddr)) { Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); - if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { + if (Register Off = matchZeroExtendFromS32(PtrBaseOffset)) { Addr = SAddr; VOffset = Off; } @@ -5223,7 +5272,7 @@ AMDGPUInstructionSelector::selectSWMMACIndex32(MachineOperand &Root) const { getDefIgnoringCopies(Root.getReg(), *MRI)->getOperand(0).getReg(); unsigned Key = 0; - Register S32 = matchZeroExtendFromS32(*MRI, Src); + Register S32 = matchZeroExtendFromS32(Src); if (!S32) S32 = matchAnyExtendFromS32(Src); @@ -5296,10 +5345,68 @@ AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const { }}; } +// Given \p Offset and load specified by the \p Root operand check if \p Offset +// is a multiple of the load byte size. If it is update \p Offset to a +// pre-scaled value and return true. +bool AMDGPUInstructionSelector::selectScaleOffset(MachineOperand &Root, + Register &Offset, + bool IsSigned) const { + if (!Subtarget->hasScaleOffset()) + return false; + + const MachineInstr &MI = *Root.getParent(); + MachineMemOperand *MMO = *MI.memoperands_begin(); + + if (!MMO->getSize().hasValue()) + return false; + + uint64_t Size = MMO->getSize().getValue(); + + Register OffsetReg = matchExtendFromS32OrS32(Offset, IsSigned); + if (!OffsetReg) + OffsetReg = Offset; + + if (auto Def = getDefSrcRegIgnoringCopies(OffsetReg, *MRI)) + OffsetReg = Def->Reg; + + Register Op0; + MachineInstr *Mul; + bool ScaleOffset = + (isPowerOf2_64(Size) && + mi_match(OffsetReg, *MRI, + m_GShl(m_Reg(Op0), + m_any_of(m_SpecificICst(Log2_64(Size)), + m_Copy(m_SpecificICst(Log2_64(Size))))))) || + mi_match(OffsetReg, *MRI, + m_GMul(m_Reg(Op0), m_any_of(m_SpecificICst(Size), + m_Copy(m_SpecificICst(Size))))) || + mi_match( + OffsetReg, *MRI, + m_BinOp(IsSigned ? AMDGPU::S_MUL_I64_I32_PSEUDO : AMDGPU::S_MUL_U64, + m_Reg(Op0), m_SpecificICst(Size))) || + // Match G_AMDGPU_MAD_U64_U32 offset, c, 0 + (mi_match(OffsetReg, *MRI, m_MInstr(Mul)) && + (Mul->getOpcode() == (IsSigned ? AMDGPU::G_AMDGPU_MAD_I64_I32 + : AMDGPU::G_AMDGPU_MAD_U64_U32) || + (IsSigned && Mul->getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32 && + VT->signBitIsZero(Mul->getOperand(2).getReg()))) && + mi_match(Mul->getOperand(4).getReg(), *MRI, m_ZeroInt()) && + mi_match(Mul->getOperand(3).getReg(), *MRI, + m_GTrunc(m_any_of(m_SpecificICst(Size), + m_Copy(m_SpecificICst(Size))))) && + mi_match(Mul->getOperand(2).getReg(), *MRI, m_Reg(Op0))); + + if (ScaleOffset) + Offset = Op0; + + return ScaleOffset; +} + bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root, Register &Base, Register *SOffset, - int64_t *Offset) const { + int64_t *Offset, + bool *ScaleOffset) const { MachineInstr *MI = Root.getParent(); MachineBasicBlock *MBB = MI->getParent(); @@ -5314,6 +5421,9 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root, const GEPInfo &GEPI = AddrInfo[0]; std::optional<int64_t> EncodedImm; + if (ScaleOffset) + *ScaleOffset = false; + if (SOffset && Offset) { EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, /*IsBuffer=*/false, /*HasSOffset=*/true); @@ -5321,8 +5431,12 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root, AddrInfo.size() > 1) { const GEPInfo &GEPI2 = AddrInfo[1]; if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) { - if (Register OffsetReg = - matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) { + Register OffsetReg = GEPI2.SgprParts[1]; + if (ScaleOffset) + *ScaleOffset = + selectScaleOffset(Root, OffsetReg, false /* IsSigned */); + OffsetReg = matchZeroExtendFromS32OrS32(OffsetReg); + if (OffsetReg) { Base = GEPI2.SgprParts[0]; *SOffset = OffsetReg; *Offset = *EncodedImm; @@ -5367,7 +5481,11 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root, } if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) { - if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) { + Register OffsetReg = GEPI.SgprParts[1]; + if (ScaleOffset) + *ScaleOffset = selectScaleOffset(Root, OffsetReg, false /* IsSigned */); + OffsetReg = matchZeroExtendFromS32OrS32(OffsetReg); + if (OffsetReg) { Base = GEPI.SgprParts[0]; *SOffset = OffsetReg; return true; @@ -5381,7 +5499,8 @@ InstructionSelector::ComplexRendererFns AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { Register Base; int64_t Offset; - if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset)) + if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset, + /* ScaleOffset */ nullptr)) return std::nullopt; return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); }, @@ -5412,23 +5531,30 @@ AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { InstructionSelector::ComplexRendererFns AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { Register Base, SOffset; - if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr)) + bool ScaleOffset; + if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr, + &ScaleOffset)) return std::nullopt; + unsigned CPol = ScaleOffset ? AMDGPU::CPol::SCAL : 0; return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); }, - [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}}; + [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }, + [=](MachineInstrBuilder &MIB) { MIB.addImm(CPol); }}}; } InstructionSelector::ComplexRendererFns AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const { Register Base, SOffset; int64_t Offset; - if (!selectSmrdOffset(Root, Base, &SOffset, &Offset)) + bool ScaleOffset; + if (!selectSmrdOffset(Root, Base, &SOffset, &Offset, &ScaleOffset)) return std::nullopt; + unsigned CPol = ScaleOffset ? AMDGPU::CPol::SCAL : 0; return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); }, [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }, - [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}}; + [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, + [=](MachineInstrBuilder &MIB) { MIB.addImm(CPol); }}}; } std::pair<Register, int> @@ -5565,7 +5691,7 @@ AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root, // It's possible voffset is an SGPR here, but the copy to VGPR will be // inserted later. - if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { + if (Register VOffset = matchZeroExtendFromS32(PtrBaseOffset)) { return {{[=](MachineInstrBuilder &MIB) { // saddr MIB.addReg(SAddr); }, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h index 34bdf0a..e58fbb4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h @@ -232,8 +232,10 @@ private: InstructionSelector::ComplexRendererFns selectVINTERPModsHi(MachineOperand &Root) const; + bool selectScaleOffset(MachineOperand &Root, Register &Offset, + bool IsSigned) const; bool selectSmrdOffset(MachineOperand &Root, Register &Base, Register *SOffset, - int64_t *Offset) const; + int64_t *Offset, bool *ScaleOffset) const; InstructionSelector::ComplexRendererFns selectSmrdImm(MachineOperand &Root) const; InstructionSelector::ComplexRendererFns @@ -421,6 +423,19 @@ private: // shift amount operand's `ShAmtBits` bits is unneeded. bool isUnneededShiftMask(const MachineInstr &MI, unsigned ShAmtBits) const; + /// Match a zero extend from a 32-bit value to 64-bits. + Register matchZeroExtendFromS32(Register Reg) const; + /// Match a sign extend from a 32-bit value to 64-bits. + Register matchSignExtendFromS32(Register Reg) const; + /// Match a zero extend from a 32-bit value to 64-bits, or \p Reg itself if it + /// is 32-bit. + Register matchZeroExtendFromS32OrS32(Register Reg) const; + /// Match a sign extend from a 32-bit value to 64-bits, or \p Reg itself if it + /// is 32-bit. + Register matchSignExtendFromS32OrS32(Register Reg) const; + /// Match either sign or zero extend depending on the \p IsSigned from a + /// 32-bit value to 64-bits, or \p Reg itself if it is 32-bit. + Register matchExtendFromS32OrS32(Register Reg, bool IsSigned) const; /// Match an any extend from a 32-bit value to 64-bit. Register matchAnyExtendFromS32(Register Reg) const; diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index dc83230..421fc42 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -5139,13 +5139,45 @@ bool AMDGPUAsmParser::validateAGPRLdSt(const MCInst &Inst) const { bool AMDGPUAsmParser::validateVGPRAlign(const MCInst &Inst) const { auto FB = getFeatureBits(); + if (!FB[AMDGPU::FeatureGFX90AInsts] && !FB[AMDGPU::FeatureGFX1250Insts]) + return true; + unsigned Opc = Inst.getOpcode(); + const MCRegisterInfo *MRI = getMRI(); // DS_READ_B96_TR_B6 is the only DS instruction in GFX950, that allows // unaligned VGPR. All others only allow even aligned VGPRs. - if (!(FB[AMDGPU::FeatureGFX90AInsts]) || Opc == AMDGPU::DS_READ_B96_TR_B6_vi) + if (FB[AMDGPU::FeatureGFX90AInsts] && Opc == AMDGPU::DS_READ_B96_TR_B6_vi) return true; - const MCRegisterInfo *MRI = getMRI(); + if (FB[AMDGPU::FeatureGFX1250Insts]) { + switch (Opc) { + default: + break; + case AMDGPU::DS_LOAD_TR6_B96: + case AMDGPU::DS_LOAD_TR6_B96_gfx12: + // DS_LOAD_TR6_B96 is the only DS instruction in GFX1250, that + // allows unaligned VGPR. All others only allow even aligned VGPRs. + return true; + case AMDGPU::GLOBAL_LOAD_TR6_B96: + case AMDGPU::GLOBAL_LOAD_TR6_B96_gfx1250: { + // GLOBAL_LOAD_TR6_B96 is the only GLOBAL instruction in GFX1250, that + // allows unaligned VGPR for vdst, but other operands still only allow + // even aligned VGPRs. + int VAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); + if (VAddrIdx != -1) { + const MCOperand &Op = Inst.getOperand(VAddrIdx); + MCRegister Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0); + if ((Sub - AMDGPU::VGPR0) & 1) + return false; + } + return true; + } + case AMDGPU::GLOBAL_LOAD_TR6_B96_SADDR: + case AMDGPU::GLOBAL_LOAD_TR6_B96_SADDR_gfx1250: + return true; + } + } + const MCRegisterClass &VGPR32 = MRI->getRegClass(AMDGPU::VGPR_32RegClassID); const MCRegisterClass &AGPR32 = MRI->getRegClass(AMDGPU::AGPR_32RegClassID); for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { @@ -5292,6 +5324,12 @@ bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst, unsigned CPol = Inst.getOperand(CPolPos).getImm(); if (!isGFX1250()) { + if (CPol & CPol::SCAL) { + SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); + StringRef CStr(S.getPointer()); + S = SMLoc::getFromPointer(&CStr.data()[CStr.find("scale_offset")]); + Error(S, "scale_offset is not supported on this GPU"); + } if (CPol & CPol::NV) { SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); StringRef CStr(S.getPointer()); @@ -5300,6 +5338,13 @@ bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst, } } + if ((CPol & CPol::SCAL) && !supportsScaleOffset(MII, Inst.getOpcode())) { + SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); + StringRef CStr(S.getPointer()); + S = SMLoc::getFromPointer(&CStr.data()[CStr.find("scale_offset")]); + Error(S, "scale_offset is not supported for this instruction"); + } + if (isGFX12Plus()) return validateTHAndScopeBits(Inst, Operands, CPol); @@ -6971,6 +7016,7 @@ ParseStatus AMDGPUAsmParser::parseCPol(OperandVector &Operands) { ParseStatus ResTH = ParseStatus::NoMatch; ParseStatus ResScope = ParseStatus::NoMatch; ParseStatus ResNV = ParseStatus::NoMatch; + ParseStatus ResScal = ParseStatus::NoMatch; for (;;) { if (ResTH.isNoMatch()) { @@ -7009,10 +7055,22 @@ ParseStatus AMDGPUAsmParser::parseCPol(OperandVector &Operands) { } } + if (ResScal.isNoMatch()) { + if (trySkipId("scale_offset")) { + ResScal = ParseStatus::Success; + CPolVal |= CPol::SCAL; + continue; + } else if (trySkipId("no", "scale_offset")) { + ResScal = ParseStatus::Success; + continue; + } + } + break; } - if (ResTH.isNoMatch() && ResScope.isNoMatch() && ResNV.isNoMatch()) + if (ResTH.isNoMatch() && ResScope.isNoMatch() && ResNV.isNoMatch() && + ResScal.isNoMatch()) return ParseStatus::NoMatch; Operands.push_back(AMDGPUOperand::CreateImm(this, CPolVal, StringLoc, diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index f7f29f1..1cc717b 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -2941,6 +2941,7 @@ multiclass VFLAT_Real_gfx12 <bits<8> op, string name = get_FLAT_ps<NAME>.Mnemoni let DecoderNamespace = "GFX12"; let Inst{25-24} = {ps.is_flat_global, ps.is_flat_scratch}; + let Inst{48} = cpol{CPolBit.SCAL}; // scale offset } } @@ -3170,6 +3171,7 @@ multiclass VFLAT_Real_gfx1250<bits<8> op, let DecoderNamespace = "GFX1250"; let Inst{25-24} = {ps.is_flat_global, ps.is_flat_scratch}; + let Inst{48} = cpol{CPolBit.SCAL}; // scale offset } } diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index c4a3be4..94886b0 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -520,8 +520,8 @@ static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard, const MachineInstr *MI, IsExpiredFn IsExpired) { DenseSet<const MachineBasicBlock *> Visited; return getWaitStatesSince(IsHazard, MI->getParent(), - std::next(MI->getReverseIterator()), - 0, IsExpired, Visited); + std::next(MI->getReverseIterator()), 0, IsExpired, + Visited, SIInstrInfo::getNumWaitStates); } int GCNHazardRecognizer::getWaitStatesSince(IsHazardFn IsHazard, int Limit) { @@ -1190,7 +1190,8 @@ void GCNHazardRecognizer::fixHazards(MachineInstr *MI) { fixVALUPartialForwardingHazard(MI); fixVALUTransUseHazard(MI); fixVALUTransCoexecutionHazards(MI); - fixWMMAHazards(MI); + fixWMMAHazards(MI); // fall-through if co-execution is enabled. + fixWMMACoexecutionHazards(MI); fixShift64HighRegBug(MI); fixVALUMaskWriteHazard(MI); fixRequiredExportPriority(MI); @@ -1909,6 +1910,182 @@ bool GCNHazardRecognizer::fixWMMAHazards(MachineInstr *MI) { return true; } +static bool isCoexecutableVALUInst(const MachineInstr &MI) { + return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isTRANS(MI) && + !SIInstrInfo::isWMMA(MI) && !SIInstrInfo::isSWMMAC(MI); // What else? +} + +static bool IsWMMAHazardInstInCategory(const MachineInstr &MI, + const SIInstrInfo *TII, unsigned Latency, + unsigned Category) { + assert(TII->isXDLWMMA(MI) && (Latency == 8 || Latency == 16) && + "Handle me if the xdl wmma instruction latency changes"); + + switch (Category) { + case 0: // Dense WMMA Instructions: + // WMMA_*F16, WMMA_*BF16 + // WMMA_*FP8FP8 + // WMMA_*FP8BF8 + // WMMA_*BF8FP8 + // WMMA_*BF8BF8 + // WMMA_*F8F6F4 if SRCA & SRCB != F8 + return Latency == 8 && SIInstrInfo::isWMMA(MI); + + case 1: // Dense WMMA Instructions: + // WMMA_IU8 + // WMMA_IU4 + // WMMA_*F8F6F4 if SRCA OR SRCB == F8 + return Latency == 16 && SIInstrInfo::isWMMA(MI); + + case 2: // Dense SWMMAC Instructions + // SWMMAC_*F16, SWMMAC_*BF16, + // SWMMAC_*FP8FP8 + // SWMMAC_*BF8FP8 + // SWMMAC_*FP8BF8 + // SWMMAC_*BF8BF8 + return Latency == 8 && SIInstrInfo::isSWMMAC(MI); + + case 3: // Sparse WMMA Instructions: + // SWMMAC_IU8 + // SWMMAC_IU4 + return Latency == 16 && SIInstrInfo::isSWMMAC(MI); + default: + break; + } // end switch. + + return false; +} + +bool GCNHazardRecognizer::fixWMMACoexecutionHazards(MachineInstr *MI) { + if (!AMDGPU::isGFX1250(ST)) + return false; + + const SIInstrInfo *TII = ST.getInstrInfo(); + if (!TII->isXDLWMMA(*MI) && !isCoexecutableVALUInst(*MI)) + return false; + + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + + // WaitStates here is the number of V_NOPs or unrelated VALU instructions must + // be in between the first WMMA and the second instruction to cover the hazard + // (WMMAWaitStates if the second is also a WMMA, VALUWaitStates if the second + // is a VALU). Refer to SPG 4.6.12.1. "Requirements for WMMA data hazards" for + // numbers, which depends on the category of the first WMMA. + const int WMMAWaitStates[] = {5, 9, 3, 5}; + const int VALUWaitStates[] = {4, 8, 2, 4}; + unsigned Category = 0; + + auto IsWMMAHazardFn = [MI, TII, TRI, &Category, this](const MachineInstr &I) { + if (!TII->isXDLWMMA(I)) + return false; + + unsigned Latency = TSchedModel.computeInstrLatency(&I); + if (!IsWMMAHazardInstInCategory(I, TII, Latency, Category)) + return false; + + Register D0 = TII->getNamedOperand(I, AMDGPU::OpName::vdst)->getReg(); + Register A1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0)->getReg(); + Register B1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg(); + + // WMMA0 wrires (D0), WMMA1 reads (A1/B1/Idx1). + if (TRI->regsOverlap(D0, A1) || TRI->regsOverlap(D0, B1)) + return true; + + if (SIInstrInfo::isSWMMAC(*MI)) { + Register Idx1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg(); + if (TRI->regsOverlap(D0, Idx1)) + return true; + } + + return false; + }; + + auto IsVALUHazardFn = [MI, TII, TRI, &Category, this](const MachineInstr &I) { + if (!TII->isXDLWMMA(I)) + return false; + + unsigned Latency = TSchedModel.computeInstrLatency(&I); + if (!IsWMMAHazardInstInCategory(I, TII, Latency, Category)) + return false; + + // WMMA writes, VALU reads. + Register D0 = TII->getNamedOperand(I, AMDGPU::OpName::vdst)->getReg(); + for (const MachineOperand &ValuUse : MI->explicit_uses()) { + if (ValuUse.isReg() && TRI->regsOverlap(D0, ValuUse.getReg())) + return true; + } + + auto *ValuDst = TII->getNamedOperand(*MI, AMDGPU::OpName::vdst); + if (!ValuDst || !ValuDst->isReg()) + return false; + Register D1 = ValuDst->getReg(); + + // WMMA writes, VALU writes. + if (TRI->regsOverlap(D0, D1)) + return true; + + // WMMA reads, VALU writes. + Register A0 = TII->getNamedOperand(I, AMDGPU::OpName::src0)->getReg(); + Register B0 = TII->getNamedOperand(I, AMDGPU::OpName::src1)->getReg(); + if (TRI->regsOverlap(A0, D1) || TRI->regsOverlap(B0, D1)) + return true; + + if (SIInstrInfo::isSWMMAC(I)) { + Register Idx0 = TII->getNamedOperand(I, AMDGPU::OpName::src2)->getReg(); + if (TRI->regsOverlap(D1, Idx0)) + return true; + } + + return false; + }; + + int Limit = 0; + auto IsExpiredFn = [&Limit](const MachineInstr &, int WaitStates) { + return WaitStates >= Limit; + }; + + auto GetWaitStatesFn = [](const MachineInstr &I) { + return SIInstrInfo::isVALU(I) ? 1 : 0; + }; + + int WaitStatesNeeded = -1; + if (TII->isXDLWMMA(*MI)) { + for (Category = 0; WaitStatesNeeded < 0 && Category < 4; Category++) { + Limit = WMMAWaitStates[Category]; // for IsExpiredFn. + DenseSet<const MachineBasicBlock *> Visited; + // '::getWaitStatesSince' returns the number of VALUs in between if hazard + // exists, and INT_MAX if there is no hazard. As a result, a negative + // WaitStatesNeeded here means no hazard, and we will continue to search + // for other categories. + WaitStatesNeeded = + Limit - ::getWaitStatesSince(IsWMMAHazardFn, MI->getParent(), + std::next(MI->getReverseIterator()), 0, + IsExpiredFn, Visited, GetWaitStatesFn); + } + } else { // Must be a co-executable VALU. + for (Category = 0; WaitStatesNeeded < 0 && Category < 4; Category++) { + Limit = VALUWaitStates[Category]; // for IsExpiredFn. + DenseSet<const MachineBasicBlock *> Visited; + // '::getWaitStatesSince' returns the number of VALUs in between if hazard + // exists, and INT_MAX if there is no hazard. As a result, a negative + // WaitStatesNeeded here means no hazard, and we will continue to search + // for other categories. + WaitStatesNeeded = + Limit - ::getWaitStatesSince(IsVALUHazardFn, MI->getParent(), + std::next(MI->getReverseIterator()), 0, + IsExpiredFn, Visited, GetWaitStatesFn); + } + } + + // WaitStatesNeeded now is the number of V_NOPs we need to insert, negative + // means not needed. + for (int i = 0; i < WaitStatesNeeded; i++) + BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), + TII->get(AMDGPU::V_NOP_e32)); + + return true; +} + bool GCNHazardRecognizer::fixShift64HighRegBug(MachineInstr *MI) { if (!ST.hasShift64HighRegBug()) return false; diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h index ef6ddd8..f796eeae 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h @@ -106,6 +106,7 @@ private: bool fixVALUTransUseHazard(MachineInstr *MI); bool fixVALUTransCoexecutionHazards(MachineInstr *MI); bool fixWMMAHazards(MachineInstr *MI); + bool fixWMMACoexecutionHazards(MachineInstr *MI); bool fixShift64HighRegBug(MachineInstr *MI); bool fixVALUMaskWriteHazard(MachineInstr *MI); bool fixRequiredExportPriority(MachineInstr *MI); diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp b/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp index 7b8f0f4..9a2bab1 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp @@ -324,7 +324,7 @@ bool GCNSubtarget::zeroesHigh16BitsOfDest(unsigned Opcode) const { } void GCNSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const { + const SchedRegion &Region) const { // Track register pressure so the scheduler can try to decrease // pressure once register usage is above the threshold defined by // SIRegisterInfo::getRegPressureSetLimit() diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index 268162b..407d79a 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -1022,7 +1022,7 @@ public: } void overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const override; + const SchedRegion &Region) const override; void mirFileLoaded(MachineFunction &MF) const override; @@ -1162,6 +1162,9 @@ public: bool hasLshlAddU64Inst() const { return HasLshlAddU64Inst; } + // Scalar and global loads support scale_offset bit. + bool hasScaleOffset() const { return GFX1250Insts; } + bool hasFlatGVSMode() const { return FlatGVSMode; } bool enableSIScheduler() const { diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp index 197bb3f..11b072e 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -157,6 +157,9 @@ void AMDGPUInstPrinter::printCPol(const MCInst *MI, unsigned OpNo, const int64_t TH = Imm & CPol::TH; const int64_t Scope = Imm & CPol::SCOPE; + if (Imm & CPol::SCAL) + O << " scale_offset"; + printTH(MI, TH, Scope, O); printScope(Scope, O); diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h index d379088..3902d4c 100644 --- a/llvm/lib/Target/AMDGPU/SIDefines.h +++ b/llvm/lib/Target/AMDGPU/SIDefines.h @@ -402,6 +402,8 @@ enum CPol { SWZ = 1 << 6, // Swizzle bit + SCAL = 1 << 11, // Scale offset bit + ALL = TH | SCOPE, // Helper bits diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index e172c0b..e5d1eaa 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -1209,18 +1209,24 @@ void SIFoldOperandsImpl::foldOperand( return; } - // A frame index will resolve to a positive constant, so it should always be - // safe to fold the addressing mode, even pre-GFX9. - UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getFI()); - const unsigned Opc = UseMI->getOpcode(); if (TII->isFLATScratch(*UseMI) && AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr) && !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::saddr)) { unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(Opc); + unsigned CPol = + TII->getNamedOperand(*UseMI, AMDGPU::OpName::cpol)->getImm(); + if ((CPol & AMDGPU::CPol::SCAL) && + !AMDGPU::supportsScaleOffset(*TII, NewOpc)) + return; + UseMI->setDesc(TII->get(NewOpc)); } + // A frame index will resolve to a positive constant, so it should always be + // safe to fold the addressing mode, even pre-GFX9. + UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getFI()); + return; } diff --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td index 6b41934..89d9b0d 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td +++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td @@ -318,6 +318,7 @@ def CPolBit { int DLC = 2; int SCC = 4; int NV = 5; + int SCAL = 11; } class VOPDstOperand <RegisterClass rc> : RegisterOperand <rc, "printVOPDst">; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index e2a2525..571f3ef 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -5482,6 +5482,19 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, } } + if (const MachineOperand *CPol = getNamedOperand(MI, AMDGPU::OpName::cpol)) { + if (CPol->getImm() & AMDGPU::CPol::SCAL) { + if (!ST.hasScaleOffset()) { + ErrInfo = "Subtarget does not support offset scaling"; + return false; + } + if (!AMDGPU::supportsScaleOffset(*this, MI.getOpcode())) { + ErrInfo = "Instruction does not support offset scaling"; + return false; + } + } + } + return true; } diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 5097ac03..b49c5a9 100644 --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -61,6 +61,7 @@ #include "AMDGPU.h" #include "GCNSubtarget.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" +#include "SIDefines.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/InitializePasses.h" @@ -1078,7 +1079,9 @@ bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI, if (EltOffset0 + CI.Width != EltOffset1 && EltOffset1 + Paired.Width != EltOffset0) return false; - if (CI.CPol != Paired.CPol) + // Instructions with scale_offset modifier cannot be combined unless we + // also generate a code to scale the offset and reset that bit. + if (CI.CPol != Paired.CPol || (CI.CPol & AMDGPU::CPol::SCAL)) return false; if (CI.InstClass == S_LOAD_IMM || CI.InstClass == S_BUFFER_LOAD_IMM || CI.InstClass == S_BUFFER_LOAD_SGPR_IMM) { diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td index d8b52d2..38cc51b 100644 --- a/llvm/lib/Target/AMDGPU/SMInstructions.td +++ b/llvm/lib/Target/AMDGPU/SMInstructions.td @@ -864,8 +864,10 @@ def smrd_prefetch : PatFrag <(ops node:$ptr, node:$rw, node:$loc, node:$type), def SMRDImm : ComplexPattern<iPTR, 2, "SelectSMRDImm">; def SMRDImm32 : ComplexPattern<iPTR, 2, "SelectSMRDImm32">; -def SMRDSgpr : ComplexPattern<iPTR, 2, "SelectSMRDSgpr">; -def SMRDSgprImm : ComplexPattern<iPTR, 3, "SelectSMRDSgprImm">; +let WantsRoot = true in { + def SMRDSgpr : ComplexPattern<iPTR, 3, "SelectSMRDSgpr", [], [], -3>; + def SMRDSgprImm : ComplexPattern<iPTR, 4, "SelectSMRDSgprImm", [], []>; +} def SMRDBufferImm : ComplexPattern<iPTR, 1, "SelectSMRDBufferImm">; def SMRDBufferImm32 : ComplexPattern<iPTR, 1, "SelectSMRDBufferImm32">; def SMRDBufferSgprImm : ComplexPattern<iPTR, 2, "SelectSMRDBufferSgprImm">; @@ -906,15 +908,15 @@ multiclass SMRD_Patterns <string Instr, ValueType vt, PatFrag frag, let SubtargetPredicate = isNotGFX9Plus; } def : GCNPat < - (frag (SMRDSgpr i64:$sbase, i32:$soffset)), - (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM"#suffix) $sbase, $soffset, 0, 0))> { + (frag (SMRDSgpr i64:$sbase, i32:$soffset, CPol:$cpol)), + (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM"#suffix) $sbase, $soffset, 0, $cpol))> { let SubtargetPredicate = isGFX9Plus; } // 4. SGPR+IMM offset def : GCNPat < - (frag (SMRDSgprImm i64:$sbase, i32:$soffset, i32:$offset)), - (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM"#suffix) $sbase, $soffset, $offset, 0))> { + (frag (SMRDSgprImm i64:$sbase, i32:$soffset, i32:$offset, CPol:$cpol)), + (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM"#suffix) $sbase, $soffset, $offset, $cpol))> { let SubtargetPredicate = isGFX9Plus; } @@ -989,15 +991,15 @@ multiclass ScalarLoadWithExtensionPat <string Instr, SDPatternOperator node, Val // 2. SGPR offset def : GCNPat < - (node (SMRDSgpr i64:$sbase, i32:$soffset)), - (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM") $sbase, $soffset, 0, 0))>{ + (node (SMRDSgpr i64:$sbase, i32:$soffset, CPol:$cpol)), + (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM") $sbase, $soffset, 0, $cpol))>{ let SubtargetPredicate = isGFX12Plus; } // 3. SGPR+IMM offset def : GCNPat < - (node (SMRDSgprImm i64:$sbase, i32:$soffset, i32:$offset)), - (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM") $sbase, $soffset, $offset, 0))>{ + (node (SMRDSgprImm i64:$sbase, i32:$soffset, i32:$offset, CPol:$cpol)), + (vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM") $sbase, $soffset, $offset, $cpol))>{ let SubtargetPredicate = isGFX12Plus; } @@ -1488,6 +1490,7 @@ class SMEM_Real_Load_gfx12<bits<6> op, string ps, string opName, OffsetMode offs let Inst{20} = cpol{CPolBit.NV}; // non-volatile let Inst{22-21} = cpol{4-3}; // scope let Inst{24-23} = cpol{1-0}; // th - only lower 2 bits are supported + let Inst{56} = cpol{CPolBit.SCAL}; // scale offset } multiclass SM_Real_Loads_gfx12<bits<6> op, string ps = NAME> { diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index 9c6c374..b5b3cc9 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -3228,6 +3228,25 @@ const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format, : getGfx9BufferFormatInfo(Format); } +bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode) { + uint64_t TSFlags = MII.get(Opcode).TSFlags; + + if (TSFlags & SIInstrFlags::SMRD) + return !getSMEMIsBuffer(Opcode); + if (!(TSFlags & SIInstrFlags::FLAT)) + return false; + + // Only SV and SVS modes are supported. + if (TSFlags & SIInstrFlags::FlatScratch) + return hasNamedOperand(Opcode, OpName::vaddr); + + // Only GVS mode is supported. + return hasNamedOperand(Opcode, OpName::vaddr) && + hasNamedOperand(Opcode, OpName::saddr); + + return false; +} + bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc) { for (auto OpName : {OpName::vdst, OpName::src0, OpName::src1, OpName::src2}) { int Idx = getNamedOperandIdx(OpDesc.getOpcode(), OpName); diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index bde951b..c09a9d6 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -1757,6 +1757,9 @@ bool isIntrinsicSourceOfDivergence(unsigned IntrID); /// \returns true if the intrinsic is uniform bool isIntrinsicAlwaysUniform(unsigned IntrID); +/// \returns true if a memory instruction supports scale_offset modifier. +bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode); + /// \returns lds block size in terms of dwords. \p /// This is used to calculate the lds size encoded for PAL metadata 3.0+ which /// must be defined in terms of bytes. diff --git a/llvm/lib/Target/DirectX/DXILPrepare.cpp b/llvm/lib/Target/DirectX/DXILPrepare.cpp index 703a9e5..c8866bf 100644 --- a/llvm/lib/Target/DirectX/DXILPrepare.cpp +++ b/llvm/lib/Target/DirectX/DXILPrepare.cpp @@ -24,7 +24,6 @@ #include "llvm/IR/AttributeMask.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" -#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" @@ -240,11 +239,6 @@ public: for (size_t Idx = 0, End = F.arg_size(); Idx < End; ++Idx) F.removeParamAttrs(Idx, AttrMask); - // Lifetime intrinsics in LLVM 3.7 do not have the memory FnAttr - if (Intrinsic::ID IID = F.getIntrinsicID(); - IID == Intrinsic::lifetime_start || IID == Intrinsic::lifetime_end) - F.removeFnAttr(Attribute::Memory); - for (auto &BB : F) { IRBuilder<> Builder(&BB); for (auto &I : make_early_inc_range(BB)) { @@ -253,7 +247,7 @@ public: // Emtting NoOp bitcast instructions allows the ValueEnumerator to be // unmodified as it reserves instruction IDs during contruction. - if (auto *LI = dyn_cast<LoadInst>(&I)) { + if (auto LI = dyn_cast<LoadInst>(&I)) { if (Value *NoOpBitcast = maybeGenerateBitcast( Builder, PointerTypes, I, LI->getPointerOperand(), LI->getType())) { @@ -263,7 +257,7 @@ public: } continue; } - if (auto *SI = dyn_cast<StoreInst>(&I)) { + if (auto SI = dyn_cast<StoreInst>(&I)) { if (Value *NoOpBitcast = maybeGenerateBitcast( Builder, PointerTypes, I, SI->getPointerOperand(), SI->getValueOperand()->getType())) { @@ -274,7 +268,7 @@ public: } continue; } - if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { + if (auto GEP = dyn_cast<GetElementPtrInst>(&I)) { if (Value *NoOpBitcast = maybeGenerateBitcast( Builder, PointerTypes, I, GEP->getPointerOperand(), GEP->getSourceElementType())) @@ -286,17 +280,6 @@ public: CB->removeRetAttrs(AttrMask); for (size_t Idx = 0, End = CB->arg_size(); Idx < End; ++Idx) CB->removeParamAttrs(Idx, AttrMask); - // LLVM 3.7 Lifetime intrinics require an i8* pointer operand, so we - // insert a bitcast here to ensure that is the case - if (isa<LifetimeIntrinsic>(CB)) { - Value *PtrOperand = CB->getArgOperand(1); - Builder.SetInsertPoint(CB); - PointerType *PtrTy = cast<PointerType>(PtrOperand->getType()); - Value *NoOpBitcast = Builder.Insert( - CastInst::Create(Instruction::BitCast, PtrOperand, - Builder.getPtrTy(PtrTy->getAddressSpace()))); - CB->setArgOperand(1, NoOpBitcast); - } continue; } } diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp index eb4adfe..bd3349d 100644 --- a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp +++ b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp @@ -152,7 +152,7 @@ void ModuleShaderFlags::updateFunctionFlags(ComputedShaderFlags &CSF, if (!CSF.Int64Ops) CSF.Int64Ops = I.getType()->isIntegerTy(64); - if (!CSF.Int64Ops && !isa<LifetimeIntrinsic>(&I)) { + if (!CSF.Int64Ops) { for (const Value *Op : I.operands()) { if (Op->getType()->isIntegerTy(64)) { CSF.Int64Ops = true; diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp index 46d5d71..1d79c30 100644 --- a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp +++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp @@ -2545,25 +2545,6 @@ void DXILBitcodeWriter::writeInstruction(const Instruction &I, unsigned InstID, Vals.clear(); } -// HLSL Change -namespace { -struct ValueNameCreator { - MallocAllocator Allocator; - SmallVector<ValueName *, 2> - ValueNames; // SmallVector N = 2 because we currently only expect this - // to hold ValueNames for Lifetime intrinsics - ~ValueNameCreator() { - for (auto *VN : ValueNames) - VN->Destroy(Allocator); - } - ValueName *create(StringRef Name, Value *V) { - ValueName *VN = ValueName::create(Name, Allocator, V); - ValueNames.push_back(VN); - return VN; - } -}; -} // anonymous namespace - // Emit names for globals/functions etc. void DXILBitcodeWriter::writeFunctionLevelValueSymbolTable( const ValueSymbolTable &VST) { @@ -2578,24 +2559,9 @@ void DXILBitcodeWriter::writeFunctionLevelValueSymbolTable( // to ensure the binary is the same no matter what values ever existed. SmallVector<const ValueName *, 16> SortedTable; - // HLSL Change - ValueNameCreator VNC; for (auto &VI : VST) { - ValueName *VN = VI.second->getValueName(); - // Clang mangles lifetime intrinsic names by appending '.p0' to the end, - // making them invalid lifetime intrinsics in LLVM 3.7. We can't - // demangle in dxil-prepare because it would result in invalid IR. - // Therefore we have to do this in the bitcode writer while writing its - // name to the symbol table. - if (const Function *Fn = dyn_cast<Function>(VI.getValue()); - Fn && Fn->isIntrinsic()) { - Intrinsic::ID IID = Fn->getIntrinsicID(); - if (IID == Intrinsic::lifetime_start || IID == Intrinsic::lifetime_end) - VN = VNC.create(Intrinsic::getBaseName(IID), VI.second); - } - SortedTable.push_back(VN); + SortedTable.push_back(VI.second->getValueName()); } - // The keys are unique, so there shouldn't be stability issues. llvm::sort(SortedTable, [](const ValueName *A, const ValueName *B) { return A->first() < B->first(); diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index a31fa57..e915a3c4 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -2514,8 +2514,9 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op, assert(ResTy.isVector()); unsigned NumElts = ResTy.getVectorNumElements(); - SDValue Vector = DAG.getUNDEF(ResTy); - for (unsigned i = 0; i < NumElts; ++i) { + SDValue Vector = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Node->getOperand(0)); + for (unsigned i = 1; i < NumElts; ++i) { Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, Node->getOperand(i), DAG.getConstant(i, DL, Subtarget.getGRLenVT())); diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td index a0107e4..5096a8f 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td @@ -1651,18 +1651,20 @@ def : Pat<(vector_insert v8i32:$xd, GRLenVT:$rj, uimm3:$imm), (XVINSGR2VR_W v8i32:$xd, GRLenVT:$rj, uimm3:$imm)>; def : Pat<(vector_insert v4i64:$xd, GRLenVT:$rj, uimm2:$imm), (XVINSGR2VR_D v4i64:$xd, GRLenVT:$rj, uimm2:$imm)>; -def : Pat<(vector_insert v8f32:$vd, (loongarch_movgr2fr_w_la64 GPR:$rj), uimm3:$imm), - (XVINSGR2VR_W $vd, $rj, uimm3:$imm)>; -def : Pat<(vector_insert v4f64:$vd, (f64 (bitconvert i64:$rj)), uimm2:$imm), - (XVINSGR2VR_D $vd, $rj, uimm2:$imm)>; +def : Pat<(vector_insert v8f32:$xd, (loongarch_movgr2fr_w_la64 GPR:$rj), uimm3:$imm), + (XVINSGR2VR_W $xd, $rj, uimm3:$imm)>; +def : Pat<(vector_insert v4f64:$xd, (f64 (bitconvert i64:$rj)), uimm2:$imm), + (XVINSGR2VR_D $xd, $rj, uimm2:$imm)>; def : Pat<(vector_insert v8f32:$xd, (f32 (vector_extract v8f32:$xj, uimm3:$imm1)), uimm3:$imm2), (XVINSGR2VR_W $xd, (XVPICKVE2GR_W v8f32:$xj, uimm3:$imm1), uimm3:$imm2)>; def : Pat<(vector_insert v4f64:$xd, (f64 (vector_extract v4f64:$xj, uimm2:$imm1)), uimm2:$imm2), (XVINSGR2VR_D $xd, (XVPICKVE2GR_D v4f64:$xj, uimm2:$imm1), uimm2:$imm2)>; + +// XVINSVE0_{W/D} def : Pat<(vector_insert v8f32:$xd, FPR32:$fj, uimm3:$imm), - (XVINSGR2VR_W $xd, (COPY_TO_REGCLASS FPR32:$fj, GPR), uimm3:$imm)>; + (XVINSVE0_W $xd, (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), uimm3:$imm)>; def : Pat<(vector_insert v4f64:$xd, FPR64:$fj, uimm2:$imm), - (XVINSGR2VR_D $xd, (COPY_TO_REGCLASS FPR64:$fj, GPR), uimm2:$imm)>; + (XVINSVE0_D $xd, (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), uimm2:$imm)>; // scalar_to_vector def : Pat<(v8f32 (scalar_to_vector FPR32:$fj)), diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td index 962e7c2..3c9defb 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td @@ -1842,10 +1842,19 @@ def : Pat<(vector_insert v4f32:$vd, (loongarch_movgr2fr_w_la64 GPR:$rj), uimm2:$ (VINSGR2VR_W $vd, $rj, uimm2:$imm)>; def : Pat<(vector_insert v2f64:$vd, (f64 (bitconvert i64:$rj)), uimm1:$imm), (VINSGR2VR_D $vd, $rj, uimm1:$imm)>; -def : Pat<(vector_insert v4f32:$vd, FPR32:$fj, uimm2:$imm), - (VINSGR2VR_W $vd, (COPY_TO_REGCLASS FPR32:$fj, GPR), uimm2:$imm)>; -def : Pat<(vector_insert v2f64:$vd, FPR64:$fj, uimm1:$imm), - (VINSGR2VR_D $vd, (COPY_TO_REGCLASS FPR64:$fj, GPR), uimm1:$imm)>; + +// VEXTRINS_{W/D} +foreach imm = 0...3 in { + defvar Imm = !shl(imm, 4); + def : Pat<(vector_insert v4f32:$vd, FPR32:$fj, imm), + (VEXTRINS_W $vd, (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), Imm)>; +} + +foreach imm = 0...1 in { + defvar Imm = !shl(imm, 4); + def : Pat<(vector_insert v2f64:$vd, FPR64:$fj, imm), + (VEXTRINS_D $vd, (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), Imm)>; +} // scalar_to_vector def : Pat<(v4f32 (scalar_to_vector FPR32:$fj)), diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp index 03ce004..7cefb3f 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp @@ -52,6 +52,9 @@ static ABI getTripleABI(const Triple &TT) { bool Is64Bit = TT.isArch64Bit(); ABI TripleABI; switch (TT.getEnvironment()) { + case llvm::Triple::EnvironmentType::UnknownEnvironment: + TripleABI = ABI_Unknown; + break; case llvm::Triple::EnvironmentType::GNUSF: case llvm::Triple::EnvironmentType::MuslSF: TripleABI = Is64Bit ? ABI_LP64S : ABI_ILP32S; @@ -96,7 +99,7 @@ ABI computeTargetABI(const Triple &TT, const FeatureBitset &FeatureBits, // 1. If the '-target-abi' is valid, use it. if (IsABIValidForFeature(ArgProvidedABI)) { - if (TT.hasEnvironment() && ArgProvidedABI != TripleABI) + if (IsABIValidForFeature(TripleABI) && ArgProvidedABI != TripleABI) errs() << "warning: triple-implied ABI conflicts with provided target-abi '" << ABIName << "', using target-abi\n"; @@ -164,10 +167,7 @@ ABI computeTargetABI(const Triple &TT, const FeatureBitset &FeatureBits, return Is64Bit ? ABI_LP64F : ABI_ILP32F; return Is64Bit ? ABI_LP64S : ABI_ILP32S; }; - if (ABIName.empty()) - errs() << "warning: the triple-implied ABI is invalid, ignoring and using " - "feature-implied ABI\n"; - else + if (!ABIName.empty()) errs() << "warning: both target-abi and the triple-implied ABI are " "invalid, ignoring and using feature-implied ABI\n"; return checkABIStandardized(GetFeatureABI()); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp index ad8f5f0..7abe9c9 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp @@ -385,11 +385,12 @@ void MipsELFObjectWriter::sortRelocs(std::vector<ELFRelocationEntry> &Relocs) { if (hasRelocationAddend()) return; - // Sort relocations by the address they are applied to. - llvm::sort(Relocs, - [](const ELFRelocationEntry &A, const ELFRelocationEntry &B) { - return A.Offset < B.Offset; - }); + // Sort relocations by r_offset. There might be more than one at an offset + // with composed relocations or .reloc directives. + llvm::stable_sort( + Relocs, [](const ELFRelocationEntry &A, const ELFRelocationEntry &B) { + return A.Offset < B.Offset; + }); // Place relocations in a list for reorder convenience. Hi16 contains the // iterators of high-part relocations. diff --git a/llvm/lib/Target/PowerPC/PPCInstrFuture.td b/llvm/lib/Target/PowerPC/PPCInstrFuture.td index 1ac91fa..80fac18 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrFuture.td +++ b/llvm/lib/Target/PowerPC/PPCInstrFuture.td @@ -53,34 +53,30 @@ let Predicates = [IsISAFuture] in { let Predicates = [HasVSX, IsISAFuture] in { let mayLoad = 1 in { - def LXVRL : XX1Form_memOp<31, 525, (outs vsrc:$XT), (ins memr:$RA, g8rc:$RB), - "lxvrl $XT, $RA, $RB", IIC_LdStLoad, []>; - - def LXVRLL : XX1Form_memOp<31, 557, (outs vsrc:$XT), (ins memr:$RA, g8rc:$RB), - "lxvrll $XT, $RA, $RB", IIC_LdStLoad, []>; - - def LXVPRL : XForm_XTp5_XAB5<31, 589, (outs vsrprc:$XTp), - (ins memr:$RA, g8rc:$RB), - "lxvprl $XTp, $RA, $RB", IIC_LdStLFD, []>; - - def LXVPRLL : XForm_XTp5_XAB5<31, 621, (outs vsrprc:$XTp), - (ins memr:$RA, g8rc:$RB), - "lxvprll $XTp, $RA, $RB", IIC_LdStLFD, []>; + def LXVRL + : XX1Form_memOp<31, 525, (outs vsrc:$XT), (ins memr:$RA, g8rc:$RB), + "lxvrl $XT, $RA, $RB", IIC_LdStLoad, []>; + def LXVRLL + : XX1Form_memOp<31, 557, (outs vsrc:$XT), (ins memr:$RA, g8rc:$RB), + "lxvrll $XT, $RA, $RB", IIC_LdStLoad, []>; + def LXVPRL + : XForm_XTp5_XAB5<31, 589, (outs vsrprc:$XTp), (ins memr:$RA, g8rc:$RB), + "lxvprl $XTp, $RA, $RB", IIC_LdStLFD, []>; + def LXVPRLL + : XForm_XTp5_XAB5<31, 621, (outs vsrprc:$XTp), (ins memr:$RA, g8rc:$RB), + "lxvprll $XTp, $RA, $RB", IIC_LdStLFD, []>; } let mayStore = 1 in { - def STXVRL : XX1Form_memOp<31, 653, (outs), - (ins vsrc:$XT, memr:$RA, g8rc:$RB), - "stxvrl $XT, $RA, $RB", IIC_LdStLoad, []>; - - def STXVRLL : XX1Form_memOp<31, 685, (outs), - (ins vsrc:$XT, memr:$RA, g8rc:$RB), - "stxvrll $XT, $RA, $RB", IIC_LdStLoad, []>; - + def STXVRL + : XX1Form_memOp<31, 653, (outs), (ins vsrc:$XT, memr:$RA, g8rc:$RB), + "stxvrl $XT, $RA, $RB", IIC_LdStLoad, []>; + def STXVRLL + : XX1Form_memOp<31, 685, (outs), (ins vsrc:$XT, memr:$RA, g8rc:$RB), + "stxvrll $XT, $RA, $RB", IIC_LdStLoad, []>; def STXVPRL : XForm_XTp5_XAB5<31, 717, (outs), (ins vsrprc:$XTp, memr:$RA, g8rc:$RB), "stxvprl $XTp, $RA, $RB", IIC_LdStLFD, []>; - def STXVPRLL : XForm_XTp5_XAB5<31, 749, (outs), (ins vsrprc:$XTp, memr:$RA, g8rc:$RB), "stxvprll $XTp, $RA, $RB", IIC_LdStLFD, []>; diff --git a/llvm/lib/Target/PowerPC/PPCSubtarget.cpp b/llvm/lib/Target/PowerPC/PPCSubtarget.cpp index 75a0272..996b6ef 100644 --- a/llvm/lib/Target/PowerPC/PPCSubtarget.cpp +++ b/llvm/lib/Target/PowerPC/PPCSubtarget.cpp @@ -171,7 +171,7 @@ void PPCSubtarget::getCriticalPathRCs(RegClassVector &CriticalPathRCs) const { } void PPCSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const { + const SchedRegion &Region) const { // The GenericScheduler that we use defaults to scheduling bottom up only. // We want to schedule from both the top and the bottom and so we set // OnlyBottomUp to false. diff --git a/llvm/lib/Target/PowerPC/PPCSubtarget.h b/llvm/lib/Target/PowerPC/PPCSubtarget.h index 9a97d1a..3c59a47 100644 --- a/llvm/lib/Target/PowerPC/PPCSubtarget.h +++ b/llvm/lib/Target/PowerPC/PPCSubtarget.h @@ -240,7 +240,8 @@ public: void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override; void overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const override; + const SchedRegion &Region) const override; + bool useAA() const override; bool enableSubRegLiveness() const override; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp index aeda5ac..5abb546 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp @@ -52,15 +52,6 @@ namespace RISCV { #include "RISCVGenSearchableTables.inc" } // namespace RISCV -// Report an error but don't ask the user to report a bug. -// TODO: Remove these wrappers. -[[noreturn]] static void reportError(const char *Reason) { - reportFatalUsageError(Reason); -} -[[noreturn]] static void reportError(Error Err) { - reportFatalUsageError(std::move(Err)); -} - namespace RISCVABI { ABI computeTargetABI(const Triple &TT, const FeatureBitset &FeatureBits, StringRef ABIName) { @@ -97,7 +88,7 @@ ABI computeTargetABI(const Triple &TT, const FeatureBitset &FeatureBits, if ((TargetABI == RISCVABI::ABI::ABI_ILP32E || (TargetABI == ABI_Unknown && IsRVE && !IsRV64)) && FeatureBits[RISCV::FeatureStdExtD]) - reportError("ILP32E cannot be used with the D ISA extension"); + reportFatalUsageError("ILP32E cannot be used with the D ISA extension"); if (TargetABI != ABI_Unknown) return TargetABI; @@ -105,7 +96,7 @@ ABI computeTargetABI(const Triple &TT, const FeatureBitset &FeatureBits, // If no explicit ABI is given, try to compute the default ABI. auto ISAInfo = RISCVFeatures::parseFeatureBits(IsRV64, FeatureBits); if (!ISAInfo) - reportError(ISAInfo.takeError()); + reportFatalUsageError(ISAInfo.takeError()); return getTargetABI((*ISAInfo)->computeDefaultABI()); } @@ -137,12 +128,12 @@ namespace RISCVFeatures { void validate(const Triple &TT, const FeatureBitset &FeatureBits) { if (TT.isArch64Bit() && !FeatureBits[RISCV::Feature64Bit]) - reportError("RV64 target requires an RV64 CPU"); + reportFatalUsageError("RV64 target requires an RV64 CPU"); if (!TT.isArch64Bit() && !FeatureBits[RISCV::Feature32Bit]) - reportError("RV32 target requires an RV32 CPU"); + reportFatalUsageError("RV32 target requires an RV32 CPU"); if (FeatureBits[RISCV::Feature32Bit] && FeatureBits[RISCV::Feature64Bit]) - reportError("RV32 and RV64 can't be combined"); + reportFatalUsageError("RV32 and RV64 can't be combined"); } llvm::Expected<std::unique_ptr<RISCVISAInfo>> diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td index cbf039e..4c303a9 100644 --- a/llvm/lib/Target/RISCV/RISCVCallingConv.td +++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td @@ -56,19 +56,21 @@ def CSR_XLEN_F32_Interrupt: CalleeSavedRegs<(add CSR_Interrupt, def CSR_XLEN_F64_Interrupt: CalleeSavedRegs<(add CSR_Interrupt, (sequence "F%u_D", 0, 31))>; +defvar VREGS = (add (sequence "V%u", 0, 31), + (sequence "V%uM2", 0, 31, 2), + (sequence "V%uM4", 0, 31, 4), + (sequence "V%uM8", 0, 31, 8)); + // Same as CSR_Interrupt, but including all vector registers. -def CSR_XLEN_V_Interrupt: CalleeSavedRegs<(add CSR_Interrupt, - (sequence "V%u", 0, 31))>; +def CSR_XLEN_V_Interrupt: CalleeSavedRegs<(add CSR_Interrupt, VREGS)>; // Same as CSR_Interrupt, but including all 32-bit FP registers and all vector // registers. -def CSR_XLEN_F32_V_Interrupt: CalleeSavedRegs<(add CSR_XLEN_F32_Interrupt, - (sequence "V%u", 0, 31))>; +def CSR_XLEN_F32_V_Interrupt: CalleeSavedRegs<(add CSR_XLEN_F32_Interrupt, VREGS)>; // Same as CSR_Interrupt, but including all 64-bit FP registers and all vector // registers. -def CSR_XLEN_F64_V_Interrupt: CalleeSavedRegs<(add CSR_XLEN_F64_Interrupt, - (sequence "V%u", 0, 31))>; +def CSR_XLEN_F64_V_Interrupt: CalleeSavedRegs<(add CSR_XLEN_F64_Interrupt, VREGS)>; // Same as CSR_Interrupt, but excluding X16-X31. def CSR_Interrupt_RVE : CalleeSavedRegs<(sub CSR_Interrupt, diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 23b4554..b1ab76a 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1544,10 +1544,53 @@ RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, return Offset; } +static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI, + const Register &Reg) { + MCRegister BaseReg = TRI.getSubReg(Reg, RISCV::sub_vrm1_0); + // If it's not a grouped vector register, it doesn't have subregister, so + // the base register is just itself. + if (BaseReg == RISCV::NoRegister) + BaseReg = Reg; + return BaseReg; +} + void RISCVFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const { TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); + + // In TargetFrameLowering::determineCalleeSaves, any vector register is marked + // as saved if any of its subregister is clobbered, this is not correct in + // vector registers. We only want the vector register to be marked as saved + // if all of its subregisters are clobbered. + // For example: + // Original behavior: If v24 is marked, v24m2, v24m4, v24m8 are also marked. + // Correct behavior: v24m2 is marked only if v24 and v25 are marked. + const MachineRegisterInfo &MRI = MF.getRegInfo(); + const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); + const RISCVRegisterInfo &TRI = *STI.getRegisterInfo(); + for (unsigned i = 0; CSRegs[i]; ++i) { + unsigned CSReg = CSRegs[i]; + // Only vector registers need special care. + if (!RISCV::VRRegClass.contains(getRVVBaseRegister(TRI, CSReg))) + continue; + + SavedRegs.reset(CSReg); + + auto SubRegs = TRI.subregs(CSReg); + // Set the register and all its subregisters. + if (!MRI.def_empty(CSReg) || MRI.getUsedPhysRegsMask().test(CSReg)) { + SavedRegs.set(CSReg); + llvm::for_each(SubRegs, [&](unsigned Reg) { return SavedRegs.set(Reg); }); + } + + // Combine to super register if all of its subregisters are marked. + if (!SubRegs.empty() && llvm::all_of(SubRegs, [&](unsigned Reg) { + return SavedRegs.test(Reg); + })) + SavedRegs.set(CSReg); + } + // Unconditionally spill RA and FP only if the function uses a frame // pointer. if (hasFP(MF)) { @@ -2137,16 +2180,6 @@ static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) { : 8; } -static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI, - const Register &Reg) { - MCRegister BaseReg = TRI.getSubReg(Reg, RISCV::sub_vrm1_0); - // If it's not a grouped vector register, it doesn't have subregister, so - // the base register is just itself. - if (BaseReg == RISCV::NoRegister) - BaseReg = Reg; - return BaseReg; -} - void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, bool HasFP) const { MachineFunction *MF = MBB.getParent(); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 4845a9c..d859db3a 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2319,6 +2319,10 @@ bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, if (getLegalZfaFPImm(Imm, VT) >= 0) return true; + // Some constants can be produced by fli+fneg. + if (Imm.isNegative() && getLegalZfaFPImm(-Imm, VT) >= 0) + return true; + // Cannot create a 64 bit floating-point immediate value for rv32. if (Subtarget.getXLen() < VT.getScalarSizeInBits()) { // td can handle +0.0 or -0.0 already. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td index aef410f..17067220 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -44,45 +44,48 @@ def simm10_unsigned : RISCVOp { //===----------------------------------------------------------------------===// let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -class RVPUnaryImm10<bits<7> funct7, string opcodestr, - DAGOperand TyImm10 = simm10> - : RVInstIBase<0b010, OPC_OP_IMM_32, (outs GPR:$rd), (ins TyImm10:$imm10), - opcodestr, "$rd, $imm10"> { +class RVPLoadImm10<bits<7> funct7, string opcodestr, + DAGOperand TyImm10 = simm10> + : RVInst<(outs GPR:$rd), (ins TyImm10:$imm10), opcodestr, "$rd, $imm10", [], + InstFormatOther> { bits<10> imm10; + bits<5> rd; let Inst{31-25} = funct7; let Inst{24-16} = imm10{8-0}; let Inst{15} = imm10{9}; + let Inst{14-12} = 0b010; + let Inst{11-7} = rd; + let Inst{6-0} = OPC_OP_IMM_32.Value; } let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in -class RVPUnaryImm8<bits<8> funct8, string opcodestr> - : RVInstIBase<0b010, OPC_OP_IMM_32, (outs GPR:$rd), (ins uimm8:$uimm8), - opcodestr, "$rd, $uimm8"> { +class RVPLoadImm8<bits<8> funct8, string opcodestr> + : RVInst<(outs GPR:$rd), (ins uimm8:$uimm8), opcodestr, "$rd, $uimm8", [], + InstFormatOther> { bits<8> uimm8; + bits<5> rd; let Inst{31-24} = funct8; let Inst{23-16} = uimm8; let Inst{15} = 0b0; + let Inst{14-12} = 0b010; + let Inst{11-7} = rd; + let Inst{6-0} = OPC_OP_IMM_32.Value; } let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class RVPUnary<bits<3> f, string opcodestr, dag operands, string argstr> : RVInstIBase<0b010, OPC_OP_IMM_32, (outs GPR:$rd), operands, opcodestr, argstr> { - bits<5> imm; - bits<5> rs1; - let Inst{31} = 0b1; let Inst{30-28} = f; let Inst{27} = 0b0; - let Inst{19-15} = rs1; } class RVPUnaryImm5<bits<3> f, string opcodestr> : RVPUnary<f, opcodestr, (ins GPR:$rs1, uimm5:$uimm5), "$rd, $rs1, $uimm5"> { bits<5> uimm5; - let imm = uimm5; let Inst{26-25} = 0b01; let Inst{24-20} = uimm5; } @@ -145,11 +148,11 @@ def PSSLAI_W : RVPUnaryImm5<0b101, "psslai.w">; } // Predicates = [HasStdExtP, IsRV64] let Predicates = [HasStdExtP] in -def PLI_H : RVPUnaryImm10<0b1011000, "pli.h">; +def PLI_H : RVPLoadImm10<0b1011000, "pli.h">; let Predicates = [HasStdExtP, IsRV64] in -def PLI_W : RVPUnaryImm10<0b1011001, "pli.w">; +def PLI_W : RVPLoadImm10<0b1011001, "pli.w">; let Predicates = [HasStdExtP] in -def PLI_B : RVPUnaryImm8<0b10110100, "pli.b">; +def PLI_B : RVPLoadImm8<0b10110100, "pli.b">; let Predicates = [HasStdExtP] in { def PSEXT_H_B : RVPUnaryWUF<0b00, 0b00100, "psext.h.b">; @@ -162,6 +165,6 @@ def PSEXT_W_H : RVPUnaryWUF<0b01, 0b00101, "psext.w.h">; } // Predicates = [HasStdExtP, IsRV64] let Predicates = [HasStdExtP] in -def PLUI_H : RVPUnaryImm10<0b1111000, "plui.h", simm10_unsigned>; +def PLUI_H : RVPLoadImm10<0b1111000, "plui.h", simm10_unsigned>; let Predicates = [HasStdExtP, IsRV64] in -def PLUI_W : RVPUnaryImm10<0b1111001, "plui.w", simm10_unsigned>; +def PLUI_W : RVPLoadImm10<0b1111001, "plui.w", simm10_unsigned>; diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp index 6de870c..878401e 100644 --- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp +++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp @@ -131,10 +131,14 @@ static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy, : Constant::getAllOnesValue(XLenTy); return true; } - if (auto *VPLdSt = dyn_cast<VPIntrinsic>(I)) { - assert((VPLdSt->getIntrinsicID() == Intrinsic::vp_load || - VPLdSt->getIntrinsicID() == Intrinsic::vp_store) && - "Unexpected intrinsic"); + + auto *II = cast<IntrinsicInst>(I); + switch (II->getIntrinsicID()) { + default: + llvm_unreachable("Unsupported intrinsic type"); + case Intrinsic::vp_load: + case Intrinsic::vp_store: { + auto *VPLdSt = cast<VPIntrinsic>(I); Ptr = VPLdSt->getMemoryPointerParam(); Alignment = VPLdSt->getPointerAlignment().value_or( DL.getABITypeAlign(VTy->getElementType())); @@ -151,21 +155,32 @@ static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy, VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy); return true; } - auto *II = cast<IntrinsicInst>(I); - assert(II->getIntrinsicID() == Intrinsic::masked_load && - "Unexpected intrinsic"); - Ptr = II->getOperand(0); - Alignment = cast<ConstantInt>(II->getArgOperand(1))->getAlignValue(); + case Intrinsic::masked_load: { + Ptr = II->getOperand(0); + Alignment = cast<ConstantInt>(II->getArgOperand(1))->getAlignValue(); - if (!isa<UndefValue>(II->getOperand(3))) - return false; + if (!isa<UndefValue>(II->getOperand(3))) + return false; - assert(Mask && "masked.load needs a mask!"); + assert(Mask && "masked.load needs a mask!"); - VL = isa<FixedVectorType>(VTy) - ? Builder.CreateElementCount(XLenTy, VTy->getElementCount()) - : Constant::getAllOnesValue(XLenTy); - return true; + VL = isa<FixedVectorType>(VTy) + ? Builder.CreateElementCount(XLenTy, VTy->getElementCount()) + : Constant::getAllOnesValue(XLenTy); + return true; + } + case Intrinsic::masked_store: { + Ptr = II->getOperand(1); + Alignment = cast<ConstantInt>(II->getArgOperand(2))->getAlignValue(); + + assert(Mask && "masked.store needs a mask!"); + + VL = isa<FixedVectorType>(VTy) + ? Builder.CreateElementCount(XLenTy, VTy->getElementCount()) + : Constant::getAllOnesValue(XLenTy); + return true; + } + } } /// Lower an interleaved load into a vlsegN intrinsic. @@ -217,6 +232,7 @@ bool RISCVTargetLowering::lowerInterleavedLoad( Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load, {VTy, BasePtr->getType(), Stride->getType()}, {BasePtr, Stride, Mask, VL}); + Alignment = commonAlignment(Alignment, Indices[0] * ScalarSizeInBytes); CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), Alignment)); Shuffles[0]->replaceAllUsesWith(CI); @@ -288,8 +304,9 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI, Intrinsic::experimental_vp_strided_store, {Data->getType(), BasePtr->getType(), Stride->getType()}, {Data, BasePtr, Stride, Mask, VL}); + Align Alignment = commonAlignment(SI->getAlign(), Index * ScalarSizeInBytes); CI->addParamAttr( - 1, Attribute::getWithAlignment(CI->getContext(), SI->getAlign())); + 1, Attribute::getWithAlignment(CI->getContext(), Alignment)); return true; } diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp index c754de4..e35ffaf 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp @@ -216,7 +216,7 @@ unsigned RISCVSubtarget::getMinimumJumpTableEntries() const { } void RISCVSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const { + const SchedRegion &Region) const { // Do bidirectional scheduling since it provides a more balanced scheduling // leading to better performance. This will increase compile time. Policy.OnlyTopDown = false; @@ -231,8 +231,8 @@ void RISCVSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, Policy.ShouldTrackPressure = true; } -void RISCVSubtarget::overridePostRASchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const { +void RISCVSubtarget::overridePostRASchedPolicy( + MachineSchedPolicy &Policy, const SchedRegion &Region) const { MISched::Direction PostRASchedDirection = getPostRASchedDirection(); if (PostRASchedDirection == MISched::TopDown) { Policy.OnlyTopDown = true; diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 4f560cc..fd57e02 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -395,11 +395,11 @@ public: } void overrideSchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const override; + const SchedRegion &Region) const override; void overridePostRASchedPolicy(MachineSchedPolicy &Policy, - unsigned NumRegionInstrs) const override; + const SchedRegion &Region) const override; }; -} // End llvm namespace +} // namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 12bf8c1..d62d99c 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -116,8 +116,8 @@ public: } TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const override { - return ST->hasVInstructions() ? TailFoldingStyle::Data - : TailFoldingStyle::DataWithoutLaneMask; + return ST->hasVInstructions() ? TailFoldingStyle::DataWithEVL + : TailFoldingStyle::None; } std::optional<unsigned> getMaxVScale() const override; std::optional<unsigned> getVScaleForTuning() const override; diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 15bd346..b53d919 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -1486,7 +1486,6 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI) const { } bool RISCVVLOptimizer::runOnMachineFunction(MachineFunction &MF) { - assert(DemandedVLs.size() == 0); if (skipFunction(MF.getFunction())) return false; @@ -1499,6 +1498,8 @@ bool RISCVVLOptimizer::runOnMachineFunction(MachineFunction &MF) { TII = ST.getInstrInfo(); + assert(DemandedVLs.empty()); + // For each instruction that defines a vector, compute what VL its // downstream users demand. for (MachineBasicBlock *MBB : post_order(&MF)) { diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp index 84ef539..c1cc19b 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp @@ -434,6 +434,15 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) { if (!isKnownSameDefs(TrueMask.getReg(), MIMask.getReg())) return false; + // Masked off lanes past TrueVL will come from False, and converting to vmv + // will lose these lanes unless MIVL <= TrueVL. + // TODO: We could relax this for False == Passthru and True policy == TU + const MachineOperand &MIVL = MI.getOperand(RISCVII::getVLOpNum(MI.getDesc())); + const MachineOperand &TrueVL = + True->getOperand(RISCVII::getVLOpNum(True->getDesc())); + if (!RISCV::isVLKnownLE(MIVL, TrueVL)) + return false; + // True's passthru needs to be equivalent to False Register TruePassthruReg = True->getOperand(1).getReg(); Register FalseReg = MI.getOperand(2).getReg(); diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp index 6766bd8..595424b 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp @@ -410,6 +410,7 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) { II, Intrinsic::SPVIntrinsics::spv_lifetime_start, {1}); } else { II->eraseFromParent(); + Changed = true; } break; case Intrinsic::lifetime_end: @@ -418,6 +419,7 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) { II, Intrinsic::SPVIntrinsics::spv_lifetime_end, {1}); } else { II->eraseFromParent(); + Changed = true; } break; case Intrinsic::ptr_annotation: diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index 9b434d8..1aa8efe 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2201,7 +2201,7 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, SDValue Chain = DAG.getEntryNode(); SDValue InGlue; - Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL); + Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue); InGlue = Chain.getValue(1); SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT); @@ -2219,7 +2219,7 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, InGlue}; Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops); InGlue = Chain.getValue(1); - Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL); + Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL); InGlue = Chain.getValue(1); SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue); diff --git a/llvm/lib/Target/X86/X86AsmPrinter.h b/llvm/lib/Target/X86/X86AsmPrinter.h index efb951b..e02b556 100644 --- a/llvm/lib/Target/X86/X86AsmPrinter.h +++ b/llvm/lib/Target/X86/X86AsmPrinter.h @@ -151,6 +151,7 @@ private: MCSymbol *LazyPointer) override; void emitCallInstruction(const llvm::MCInst &MCI); + void maybeEmitNopAfterCallForWindowsEH(const MachineInstr *MI); // Emits a label to mark the next instruction as being relevant to Import Call // Optimization. diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 6281124..568a8c4 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -45059,6 +45059,10 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( unsigned NumElts = DemandedElts.getBitWidth(); switch (Op.getOpcode()) { + case X86ISD::GlobalBaseReg: + case X86ISD::Wrapper: + case X86ISD::WrapperRIP: + return true; case X86ISD::BLENDI: case X86ISD::PSHUFD: case X86ISD::UNPCKL: @@ -45098,27 +45102,34 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode( bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const { switch (Op.getOpcode()) { + // SSE vector insert/extracts use modulo indices. + case X86ISD::PINSRB: + case X86ISD::PINSRW: + case X86ISD::PEXTRB: + case X86ISD::PEXTRW: + return false; // SSE vector multiplies are either inbounds or saturate. case X86ISD::VPMADDUBSW: case X86ISD::VPMADDWD: + return false; // SSE vector shifts handle out of bounds shift amounts. case X86ISD::VSHLI: case X86ISD::VSRLI: case X86ISD::VSRAI: return false; - // SSE blends. + // SSE blends. case X86ISD::BLENDI: case X86ISD::BLENDV: return false; - // SSE target shuffles. + // SSE target shuffles. case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: case X86ISD::VPERMILPI: case X86ISD::VPERMV3: return false; - // SSE comparisons handle all icmp/fcmp cases. - // TODO: Add CMPM/MM with test coverage. + // SSE comparisons handle all icmp/fcmp cases. + // TODO: Add CMPM/MM with test coverage. case X86ISD::CMPP: case X86ISD::PCMPEQ: case X86ISD::PCMPGT: diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index 45d596b..481a9be 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -32,6 +32,7 @@ #include "llvm/CodeGen/MachineModuleInfoImpls.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/StackMaps.h" +#include "llvm/CodeGen/WinEHFuncInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Mangler.h" @@ -833,6 +834,7 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, CallInst.setOpcode(CallOpcode); CallInst.addOperand(CallTargetMCOp); OutStreamer->emitInstruction(CallInst, getSubtargetInfo()); + maybeEmitNopAfterCallForWindowsEH(&MI); } // Record our statepoint node in the same section used by STACKMAP @@ -1430,21 +1432,6 @@ void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, OutStreamer->emitLabel(FallthroughLabel); } -// Returns instruction preceding MBBI in MachineFunction. -// If MBBI is the first instruction of the first basic block, returns null. -static MachineBasicBlock::const_iterator -PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { - const MachineBasicBlock *MBB = MBBI->getParent(); - while (MBBI == MBB->begin()) { - if (MBB == &MBB->getParent()->front()) - return MachineBasicBlock::const_iterator(); - MBB = MBB->getPrevNode(); - MBBI = MBB->end(); - } - --MBBI; - return MBBI; -} - static unsigned getSrcIdx(const MachineInstr* MI, unsigned SrcIdx) { if (X86II::isKMasked(MI->getDesc().TSFlags)) { // Skip mask operand. @@ -2271,6 +2258,9 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { OutStreamer->AddComment("EVEX TO EVEX Compression ", false); } + // We use this to suppress NOP padding for Windows EH. + bool IsTailJump = false; + switch (MI->getOpcode()) { case TargetOpcode::DBG_VALUE: llvm_unreachable("Should be handled target independently"); @@ -2325,6 +2315,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { // Lower this as normal, but add a comment. OutStreamer->AddComment("TAILCALL"); + IsTailJump = true; break; case X86::TAILJMPr: @@ -2340,6 +2331,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { // Lower these as normal, but add some comments. OutStreamer->AddComment("TAILCALL"); + IsTailJump = true; break; case X86::TAILJMPm64_REX: @@ -2349,6 +2341,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { } OutStreamer->AddComment("TAILCALL"); + IsTailJump = true; break; case X86::TAILJMPr64_REX: { @@ -2361,6 +2354,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { } OutStreamer->AddComment("TAILCALL"); + IsTailJump = true; break; } @@ -2537,26 +2531,6 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { case X86::SEH_BeginEpilogue: { assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); - // Windows unwinder will not invoke function's exception handler if IP is - // either in prologue or in epilogue. This behavior causes a problem when a - // call immediately precedes an epilogue, because the return address points - // into the epilogue. To cope with that, we insert a 'nop' if it ends up - // immediately after a CALL in the final emitted code. - MachineBasicBlock::const_iterator MBBI(MI); - // Check if preceded by a call and emit nop if so. - for (MBBI = PrevCrossBBInst(MBBI); - MBBI != MachineBasicBlock::const_iterator(); - MBBI = PrevCrossBBInst(MBBI)) { - // Pseudo instructions that aren't a call are assumed to not emit any - // code. If they do, we worst case generate unnecessary noops after a - // call. - if (MBBI->isCall() || !MBBI->isPseudo()) { - if (MBBI->isCall()) - EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); - break; - } - } - EmitSEHInstruction(MI); return; } @@ -2585,6 +2559,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); emitCallInstruction(TmpInst); emitNop(*OutStreamer, 5, Subtarget); + maybeEmitNopAfterCallForWindowsEH(MI); return; } @@ -2605,6 +2580,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { // For Import Call Optimization to work, we need a 3-byte nop after the // call instruction. emitNop(*OutStreamer, 3, Subtarget); + maybeEmitNopAfterCallForWindowsEH(MI); return; } break; @@ -2638,6 +2614,10 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) { if (MI->isCall()) { emitCallInstruction(TmpInst); + // Since tail calls transfer control without leaving a stack frame, there is + // never a need for NOP padding tail calls. + if (!IsTailJump) + maybeEmitNopAfterCallForWindowsEH(MI); return; } @@ -2659,6 +2639,164 @@ void X86AsmPrinter::emitCallInstruction(const llvm::MCInst &MCI) { OutStreamer->emitInstruction(MCI, getSubtargetInfo()); } +// Determines whether a NOP is required after a CALL, so that Windows EH +// IP2State tables have the correct information. +// +// On most Windows platforms (AMD64, ARM64, ARM32, IA64, but *not* x86-32), +// exception handling works by looking up instruction pointers in lookup +// tables. These lookup tables are stored in .xdata sections in executables. +// One element of the lookup tables are the "IP2State" tables (Instruction +// Pointer to State). +// +// If a function has any instructions that require cleanup during exception +// unwinding, then it will have an IP2State table. Each entry in the IP2State +// table describes a range of bytes in the function's instruction stream, and +// associates an "EH state number" with that range of instructions. A value of +// -1 means "the null state", which does not require any code to execute. +// A value other than -1 is an index into the State table. +// +// The entries in the IP2State table contain byte offsets within the instruction +// stream of the function. The Windows ABI requires that these offsets are +// aligned to instruction boundaries; they are not permitted to point to a byte +// that is not the first byte of an instruction. +// +// Unfortunately, CALL instructions present a problem during unwinding. CALL +// instructions push the address of the instruction after the CALL instruction, +// so that execution can resume after the CALL. If the CALL is the last +// instruction within an IP2State region, then the return address (on the stack) +// points to the *next* IP2State region. This means that the unwinder will +// use the wrong cleanup funclet during unwinding. +// +// To fix this problem, the Windows AMD64 ABI requires that CALL instructions +// are never placed at the end of an IP2State region. Stated equivalently, the +// end of a CALL instruction cannot be aligned to an IP2State boundary. If a +// CALL instruction would occur at the end of an IP2State region, then the +// compiler must insert a NOP instruction after the CALL. The NOP instruction +// is placed in the same EH region as the CALL instruction, so that the return +// address points to the NOP and the unwinder will locate the correct region. +// +// NOP padding is only necessary on Windows AMD64 targets. On ARM64 and ARM32, +// instructions have a fixed size so the unwinder knows how to "back up" by +// one instruction. +// +// Interaction with Import Call Optimization (ICO): +// +// Import Call Optimization (ICO) is a compiler + OS feature on Windows which +// improves the performance and security of DLL imports. ICO relies on using a +// specific CALL idiom that can be replaced by the OS DLL loader. This removes +// a load and indirect CALL and replaces it with a single direct CALL. +// +// To achieve this, ICO also inserts NOPs after the CALL instruction. If the +// end of the CALL is aligned with an EH state transition, we *also* insert +// a single-byte NOP. **Both forms of NOPs must be preserved.** They cannot +// be combined into a single larger NOP; nor can the second NOP be removed. +// +// This is necessary because, if ICO is active and the call site is modified +// by the loader, the loader will end up overwriting the NOPs that were inserted +// for ICO. That means that those NOPs cannot be used for the correct +// termination of the exception handling region (the IP2State transition), +// so we still need an additional NOP instruction. The NOPs cannot be combined +// into a longer NOP (which is ordinarily desirable) because then ICO would +// split one instruction, producing a malformed instruction after the ICO call. +void X86AsmPrinter::maybeEmitNopAfterCallForWindowsEH(const MachineInstr *MI) { + // We only need to insert NOPs after CALLs when targeting Windows on AMD64. + // (Don't let the name fool you: Itanium refers to table-based exception + // handling, not the Itanium architecture.) + if (MAI->getExceptionHandlingType() != ExceptionHandling::WinEH || + MAI->getWinEHEncodingType() != WinEH::EncodingType::Itanium) { + return; + } + + bool HasEHPersonality = MF->getWinEHFuncInfo() != nullptr; + + // Set up MBB iterator, initially positioned on the same MBB as MI. + MachineFunction::const_iterator MFI(MI->getParent()); + MachineFunction::const_iterator MFE(MF->end()); + + // Set up instruction iterator, positioned immediately *after* MI. + MachineBasicBlock::const_iterator MBBI(MI); + MachineBasicBlock::const_iterator MBBE = MI->getParent()->end(); + ++MBBI; // Step over MI + + // This loop iterates MBBs + for (;;) { + // This loop iterates instructions + for (; MBBI != MBBE; ++MBBI) { + // Check the instruction that follows this CALL. + const MachineInstr &NextMI = *MBBI; + + // If there is an EH_LABEL after this CALL, then there is an EH state + // transition after this CALL. This is exactly the situation which + // requires NOP padding. + if (NextMI.isEHLabel()) { + if (HasEHPersonality) { + EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); + return; + } + // We actually want to continue, in case there is an SEH_BeginEpilogue + // instruction after the EH_LABEL. In some situations, IR is produced + // that contains EH_LABEL pseudo-instructions, even when we are not + // generating IP2State tables. We still need to insert a NOP before + // SEH_BeginEpilogue in that case. + continue; + } + + // Somewhat similarly, if the CALL is the last instruction before the + // SEH prologue, then we also need a NOP. This is necessary because the + // Windows stack unwinder will not invoke a function's exception handler + // if the instruction pointer is in the function prologue or epilogue. + // + // We always emit a NOP before SEH_BeginEpilogue, even if there is no + // personality function (unwind info) for this frame. This is the same + // behavior as MSVC. + if (NextMI.getOpcode() == X86::SEH_BeginEpilogue) { + EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); + return; + } + + if (!NextMI.isPseudo() && !NextMI.isMetaInstruction()) { + // We found a real instruction. During the CALL, the return IP will + // point to this instruction. Since this instruction has the same EH + // state as the call itself (because there is no intervening EH_LABEL), + // the IP2State table will be accurate; there is no need to insert a + // NOP. + return; + } + + // The next instruction is a pseudo-op. Ignore it and keep searching. + // Because these instructions do not generate any machine code, they + // cannot prevent the IP2State table from pointing at the wrong + // instruction during a CALL. + } + + // We've reached the end of this MBB. Find the next MBB in program order. + // MBB order should be finalized by this point, so falling across MBBs is + // expected. + ++MFI; + if (MFI == MFE) { + // No more blocks; we've reached the end of the function. This should + // only happen with no-return functions, but double-check to be sure. + if (HasEHPersonality) { + // If the CALL has no successors, then it is a noreturn function. + // Insert an INT3 instead of a NOP. This accomplishes the same purpose, + // but is more clear to read. Also, analysis tools will understand + // that they should not continue disassembling after the CALL (unless + // there are other branches to that label). + if (MI->getParent()->succ_empty()) + EmitAndCountInstruction(MCInstBuilder(X86::INT3)); + else + EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); + } + return; + } + + // Set up iterator to scan the next basic block. + const MachineBasicBlock *NextMBB = &*MFI; + MBBI = NextMBB->instr_begin(); + MBBE = NextMBB->instr_end(); + } +} + void X86AsmPrinter::emitLabelAndRecordForImportCallOptimization( ImportCallKind Kind) { assert(EnableImportCallOptimization); diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp index a3a0e31..7fa6e6c5 100644 --- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -886,35 +886,20 @@ static std::optional<PartStore> matchPartStore(Instruction &I, return {{PtrBase, PtrOffset, Val, ValOffset, ValWidth, Store}}; } -static bool mergePartStores(SmallVectorImpl<PartStore> &Parts, - const DataLayout &DL, TargetTransformInfo &TTI) { +static bool mergeConsecutivePartStores(ArrayRef<PartStore> Parts, + unsigned Width, const DataLayout &DL, + TargetTransformInfo &TTI) { if (Parts.size() < 2) return false; - // We now have multiple parts of the same value stored to the same pointer. - // Sort the parts by pointer offset, and make sure they are consistent with - // the value offsets. Also check that the value is fully covered without - // overlaps. - // FIXME: We could support merging stores for only part of the value here. - llvm::sort(Parts); - int64_t LastEndOffsetFromFirst = 0; - const PartStore &First = Parts[0]; - for (const PartStore &Part : Parts) { - APInt PtrOffsetFromFirst = Part.PtrOffset - First.PtrOffset; - int64_t ValOffsetFromFirst = Part.ValOffset - First.ValOffset; - if (PtrOffsetFromFirst * 8 != ValOffsetFromFirst || - LastEndOffsetFromFirst != ValOffsetFromFirst) - return false; - LastEndOffsetFromFirst = ValOffsetFromFirst + Part.ValWidth; - } - // Check whether combining the stores is profitable. // FIXME: We could generate smaller stores if we can't produce a large one. + const PartStore &First = Parts.front(); LLVMContext &Ctx = First.Store->getContext(); - Type *NewTy = Type::getIntNTy(Ctx, LastEndOffsetFromFirst); + Type *NewTy = Type::getIntNTy(Ctx, Width); unsigned Fast = 0; if (!TTI.isTypeLegal(NewTy) || - !TTI.allowsMisalignedMemoryAccesses(Ctx, LastEndOffsetFromFirst, + !TTI.allowsMisalignedMemoryAccesses(Ctx, Width, First.Store->getPointerAddressSpace(), First.Store->getAlign(), &Fast) || !Fast) @@ -941,6 +926,39 @@ static bool mergePartStores(SmallVectorImpl<PartStore> &Parts, return true; } +static bool mergePartStores(SmallVectorImpl<PartStore> &Parts, + const DataLayout &DL, TargetTransformInfo &TTI) { + if (Parts.size() < 2) + return false; + + // We now have multiple parts of the same value stored to the same pointer. + // Sort the parts by pointer offset, and make sure they are consistent with + // the value offsets. Also check that the value is fully covered without + // overlaps. + bool Changed = false; + llvm::sort(Parts); + int64_t LastEndOffsetFromFirst = 0; + const PartStore *First = &Parts[0]; + for (const PartStore &Part : Parts) { + APInt PtrOffsetFromFirst = Part.PtrOffset - First->PtrOffset; + int64_t ValOffsetFromFirst = Part.ValOffset - First->ValOffset; + if (PtrOffsetFromFirst * 8 != ValOffsetFromFirst || + LastEndOffsetFromFirst != ValOffsetFromFirst) { + Changed |= mergeConsecutivePartStores(ArrayRef(First, &Part), + LastEndOffsetFromFirst, DL, TTI); + First = &Part; + LastEndOffsetFromFirst = Part.ValWidth; + continue; + } + + LastEndOffsetFromFirst = ValOffsetFromFirst + Part.ValWidth; + } + + Changed |= mergeConsecutivePartStores(ArrayRef(First, Parts.end()), + LastEndOffsetFromFirst, DL, TTI); + return Changed; +} + static bool foldConsecutiveStores(BasicBlock &BB, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA) { // FIXME: Add big endian support. diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index 469f435..b803c97 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -3998,6 +3998,24 @@ void IndexCallsiteContextGraph::updateCall(CallInfo &CallerCall, CI->Clones[CallerCall.cloneNo()] = CalleeFunc.cloneNo(); } +// Update the debug information attached to NewFunc to use the clone Name. Note +// this needs to be done for both any existing DISubprogram for the definition, +// as well as any separate declaration DISubprogram. +static void updateSubprogramLinkageName(Function *NewFunc, StringRef Name) { + assert(Name == NewFunc->getName()); + auto *SP = NewFunc->getSubprogram(); + if (!SP) + return; + auto *MDName = MDString::get(NewFunc->getParent()->getContext(), Name); + SP->replaceLinkageName(MDName); + DISubprogram *Decl = SP->getDeclaration(); + if (!Decl) + return; + TempDISubprogram NewDecl = Decl->clone(); + NewDecl->replaceLinkageName(MDName); + SP->replaceDeclaration(MDNode::replaceWithUniqued(std::move(NewDecl))); +} + CallsiteContextGraph<ModuleCallsiteContextGraph, Function, Instruction *>::FuncInfo ModuleCallsiteContextGraph::cloneFunctionForCallsite( @@ -4009,9 +4027,7 @@ ModuleCallsiteContextGraph::cloneFunctionForCallsite( std::string Name = getMemProfFuncName(Func.func()->getName(), CloneNo); assert(!Func.func()->getParent()->getFunction(Name)); NewFunc->setName(Name); - if (auto *SP = NewFunc->getSubprogram()) - SP->replaceLinkageName( - MDString::get(NewFunc->getParent()->getContext(), Name)); + updateSubprogramLinkageName(NewFunc, Name); for (auto &Inst : CallsWithMetadataInFunc) { // This map always has the initial version in it. assert(Inst.cloneNo() == 0); @@ -4950,9 +4966,7 @@ static SmallVector<std::unique_ptr<ValueToValueMapTy>, 4> createFunctionClones( PrevF->eraseFromParent(); } else NewF->setName(Name); - if (auto *SP = NewF->getSubprogram()) - SP->replaceLinkageName( - MDString::get(NewF->getParent()->getContext(), Name)); + updateSubprogramLinkageName(NewF, Name); ORE.emit(OptimizationRemark(DEBUG_TYPE, "MemprofClone", &F) << "created clone " << ore::NV("NewFunction", NewF)); diff --git a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp index 221094f..b9546c5 100644 --- a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -128,6 +128,8 @@ private: // from any other block. So this variable set to true means that loop's latch // has become unreachable from loop header. bool DeleteCurrentLoop = false; + // Whether or not we enter the loop through an indirectbr. + bool HasIndirectEntry = false; // The blocks of the original loop that will still be reachable from entry // after the constant folding. @@ -216,6 +218,19 @@ private: return; } + // We need a loop preheader to split in handleDeadExits(). If LoopSimplify + // wasn't able to form one because the loop can be entered through an + // indirectbr we cannot continue. + if (!L.getLoopPreheader()) { + assert(any_of(predecessors(L.getHeader()), + [&](BasicBlock *Pred) { + return isa<IndirectBrInst>(Pred->getTerminator()); + }) && + "Loop should have preheader if it is not entered indirectly"); + HasIndirectEntry = true; + return; + } + // Collect live and dead loop blocks and exits. LiveLoopBlocks.insert(L.getHeader()); for (auto I = DFS.beginRPO(), E = DFS.endRPO(); I != E; ++I) { @@ -546,6 +561,12 @@ public: return false; } + if (HasIndirectEntry) { + LLVM_DEBUG(dbgs() << "Loops which can be entered indirectly are not" + " supported!\n"); + return false; + } + // Nothing to constant-fold. if (FoldCandidates.empty()) { LLVM_DEBUG( diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp index 820c8e1..ced61cb 100644 --- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp +++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp @@ -1105,7 +1105,9 @@ bool ScalarizerVisitor::visitExtractValueInst(ExtractValueInst &EVI) { Res.push_back(ResElem); } - gather(&EVI, Res, *VS); + Type *ActualVecType = cast<FixedVectorType>(OpTy->getContainedType(Index)); + std::optional<VectorSplit> AVS = getVectorSplit(ActualVecType); + gather(&EVI, Res, *AVS); return true; } diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 1e3ef68..f89d36f 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3857,6 +3857,10 @@ bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { if (Op->isSwiftError()) return false; + // Cannot replace alloca argument with phi/select. + if (I->isLifetimeStartOrEnd()) + return false; + // Early exit. if (!isa<Constant, InlineAsm>(Op)) return true; diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 75c9650..94b0ab8 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -2227,16 +2227,6 @@ static bool canSinkInstructions( return I->getOperand(OI) == I0->getOperand(OI); }; if (!all_of(Insts, SameAsI0)) { - // SROA can't speculate lifetime markers of selects/phis, and the - // backend may handle such lifetimes incorrectly as well (#104776). - // Don't sink lifetimes if it would introduce a phi on the pointer - // argument. - if (isa<LifetimeIntrinsic>(I0) && OI == 1 && - any_of(Insts, [](const Instruction *I) { - return isa<AllocaInst>(I->getOperand(1)->stripPointerCasts()); - })) - return false; - if ((isa<Constant>(Op) && !replacingOperandWithVariableIsCheap(I0, OI)) || !canReplaceOperandWithVariable(I0, OI)) // We can't create a PHI from this GEP. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 3ce9d29..46bc26c 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1354,9 +1354,10 @@ public: ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(), ForceTailFoldingStyle.getValue()}; - if (ForceTailFoldingStyle != TailFoldingStyle::DataWithEVL) + if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL && + ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL) return; - // Override forced styles if needed. + // Override EVL styles if needed. // FIXME: Investigate opportunity for fixed vector factor. bool EVLIsLegal = UserIC <= 1 && IsScalableVF && TTI.hasActiveVectorLength() && !EnableVPlanNativePath; @@ -4479,6 +4480,28 @@ VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor( Type *TCType = Legal->getWidestInductionType(); const SCEV *RemainingIterations = nullptr; unsigned MaxTripCount = 0; + if (MainLoopVF.isFixed()) { + // TODO: extend to support scalable VFs. + const SCEV *TC = vputils::getSCEVExprForVPValue( + getPlanFor(MainLoopVF).getTripCount(), SE); + assert(!isa<SCEVCouldNotCompute>(TC) && + "Trip count SCEV must be computable"); + RemainingIterations = SE.getURemExpr( + TC, SE.getConstant(TCType, MainLoopVF.getFixedValue() * IC)); + + // No iterations left to process in the epilogue. + if (RemainingIterations->isZero()) + return Result; + + MaxTripCount = MainLoopVF.getFixedValue() * IC - 1; + if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations, + SE.getConstant(TCType, MaxTripCount))) { + MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue(); + } + LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: " + << MaxTripCount << "\n"); + } + for (auto &NextVF : ProfitableVFs) { // Skip candidate VFs without a corresponding VPlan. if (!hasPlanWithVF(NextVF.Width)) @@ -4496,24 +4519,7 @@ VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor( // If NextVF is greater than the number of remaining iterations, the // epilogue loop would be dead. Skip such factors. - if (!MainLoopVF.isScalable() && !NextVF.Width.isScalable()) { - // TODO: extend to support scalable VFs. - if (!RemainingIterations) { - const SCEV *TC = vputils::getSCEVExprForVPValue( - getPlanFor(NextVF.Width).getTripCount(), SE); - assert(!isa<SCEVCouldNotCompute>(TC) && - "Trip count SCEV must be computable"); - RemainingIterations = SE.getURemExpr( - TC, SE.getConstant(TCType, MainLoopVF.getFixedValue() * IC)); - MaxTripCount = MainLoopVF.getFixedValue() * IC - 1; - if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations, - SE.getConstant(TCType, MaxTripCount))) { - MaxTripCount = - SE.getUnsignedRangeMax(RemainingIterations).getZExtValue(); - } - LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: " - << MaxTripCount << "\n"); - } + if (RemainingIterations && !NextVF.Width.isScalable()) { if (SE.isKnownPredicate( CmpInst::ICMP_UGT, SE.getConstant(TCType, NextVF.Width.getFixedValue()), diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 5296364..0d0b342 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -206,12 +206,6 @@ static cl::opt<bool> VectorizeNonPowerOf2( "slp-vectorize-non-power-of-2", cl::init(false), cl::Hidden, cl::desc("Try to vectorize with non-power-of-2 number of elements.")); -/// Enables vectorization of copyable elements. -static cl::opt<bool> VectorizeCopyableElements( - "slp-copyable-elements", cl::init(true), cl::Hidden, - cl::desc("Try to replace values with the idempotent instructions for " - "better vectorization.")); - // Limit the number of alias checks. The limit is chosen so that // it has no negative effect on the llvm benchmarks. static const unsigned AliasedCheckLimit = 10; @@ -861,13 +855,6 @@ static std::optional<unsigned> getExtractIndex(const Instruction *E) { return *EI->idx_begin(); } -namespace llvm { -/// Checks if the specified value does not require scheduling. It does not -/// require scheduling if all operands and all users do not need to be scheduled -/// in the current basic block. -static bool doesNotNeedToBeScheduled(Value *V); -} // namespace llvm - namespace { /// \returns true if \p Opcode is allowed as part of the main/alternate /// instruction for SLP vectorization. @@ -970,33 +957,6 @@ class BinOpSameOpcodeHelper { return Instruction::Xor; llvm_unreachable("Cannot find interchangeable instruction."); } - - /// Return true if the instruction can be converted to \p Opcode. - bool hasCandidateOpcode(unsigned Opcode) const { - MaskType Candidate = Mask & SeenBefore; - switch (Opcode) { - case Instruction::Shl: - return Candidate & ShlBIT; - case Instruction::AShr: - return Candidate & AShrBIT; - case Instruction::Mul: - return Candidate & MulBIT; - case Instruction::Add: - return Candidate & AddBIT; - case Instruction::Sub: - return Candidate & SubBIT; - case Instruction::And: - return Candidate & AndBIT; - case Instruction::Or: - return Candidate & OrBIT; - case Instruction::Xor: - return Candidate & XorBIT; - default: - break; - } - llvm_unreachable("Cannot find interchangeable instruction."); - } - SmallVector<Value *> getOperand(const Instruction *To) const { unsigned ToOpcode = To->getOpcode(); unsigned FromOpcode = I->getOpcode(); @@ -1157,10 +1117,6 @@ public: AltOp.trySet(OpcodeInMaskForm, InterchangeableMask)); } unsigned getMainOpcode() const { return MainOp.getOpcode(); } - /// Checks if the list of potential opcodes includes \p Opcode. - bool hasCandidateOpcode(unsigned Opcode) const { - return MainOp.hasCandidateOpcode(Opcode); - } bool hasAltOp() const { return AltOp.I; } unsigned getAltOpcode() const { return hasAltOp() ? AltOp.getOpcode() : getMainOpcode(); @@ -1196,8 +1152,6 @@ class InstructionsState { /// GetVectorCost. Instruction *MainOp = nullptr; Instruction *AltOp = nullptr; - /// Wether the instruction state represents copyable instructions. - bool HasCopyables = false; public: Instruction *getMainOp() const { @@ -1236,11 +1190,9 @@ public: if (!I->isBinaryOp()) return nullptr; BinOpSameOpcodeHelper Converter(MainOp); - if (!Converter.add(I) || !Converter.add(MainOp)) - return nullptr; - if (Converter.hasAltOp() && !isAltShuffle()) - return nullptr; - return Converter.hasAltOp() ? AltOp : MainOp; + if (Converter.add(I) && Converter.add(MainOp) && !Converter.hasAltOp()) + return MainOp; + return AltOp; } /// Checks if main/alt instructions are shift operations. @@ -1285,63 +1237,9 @@ public: explicit operator bool() const { return valid(); } InstructionsState() = delete; - InstructionsState(Instruction *MainOp, Instruction *AltOp, - bool HasCopyables = false) - : MainOp(MainOp), AltOp(AltOp), HasCopyables(HasCopyables) {} + InstructionsState(Instruction *MainOp, Instruction *AltOp) + : MainOp(MainOp), AltOp(AltOp) {} static InstructionsState invalid() { return {nullptr, nullptr}; } - - bool isCopyableElement(Value *V) const { - assert(valid() && "InstructionsState is invalid."); - if (!HasCopyables) - return false; - if (isAltShuffle() || getOpcode() == Instruction::GetElementPtr) - return false; - auto *I = dyn_cast<Instruction>(V); - if (!I) - return !isa<PoisonValue>(V); - if (I->getParent() != MainOp->getParent() && - (!isVectorLikeInstWithConstOps(I) || - !isVectorLikeInstWithConstOps(MainOp))) - return true; - if (I->getOpcode() == MainOp->getOpcode()) - return false; - if (!I->isBinaryOp()) - return true; - BinOpSameOpcodeHelper Converter(MainOp); - return !Converter.add(I) || !Converter.add(MainOp) || - Converter.hasAltOp() || !Converter.hasCandidateOpcode(getOpcode()); - } - - /// Checks if the value is non-schedulable. - bool isNonSchedulable(Value *V) const { - assert(valid() && "InstructionsState is invalid."); - auto *I = dyn_cast<Instruction>(V); - if (!HasCopyables) - return !I || isa<PHINode>(I) || isVectorLikeInstWithConstOps(I) || - doesNotNeedToBeScheduled(V); - // MainOp for copyables always schedulable to correctly identify - // non-schedulable copyables. - if (isCopyableElement(V)) { - auto IsNonSchedulableCopyableElement = [this](Value *V) { - auto *I = dyn_cast<Instruction>(V); - return !I || isa<PHINode>(I) || I->getParent() != MainOp->getParent() || - (doesNotNeedToBeScheduled(I) && - // If the copyable instructions comes after MainOp - // (non-schedulable, but used in the block) - cannot vectorize - // it, will possibly generate use before def. - (isVectorLikeInstWithConstOps(I) || !MainOp->comesBefore(I))); - }; - - return IsNonSchedulableCopyableElement(V); - } - return !I || isa<PHINode>(I) || isVectorLikeInstWithConstOps(I) || - doesNotNeedToBeScheduled(V); - } - - bool areInstructionsWithCopyableElements() const { - assert(valid() && "InstructionsState is invalid."); - return HasCopyables; - } }; std::pair<Instruction *, SmallVector<Value *>> @@ -3001,6 +2899,9 @@ public: for (OperandDataVec &Ops : OpsVec) Ops.resize(NumLanes); for (unsigned Lane : seq<unsigned>(NumLanes)) { + Value *V = VL[Lane]; + assert((isa<Instruction>(V) || isa<PoisonValue>(V)) && + "Expected instruction or poison value"); // Our tree has just 3 nodes: the root and two operands. // It is therefore trivial to get the APO. We only need to check the // opcode of V and whether the operand at OpIdx is the LHS or RHS @@ -3011,24 +2912,17 @@ public: // Since operand reordering is performed on groups of commutative // operations or alternating sequences (e.g., +, -), we can safely tell // the inverse operations by checking commutativity. - auto *I = dyn_cast<Instruction>(VL[Lane]); - if (!I && isa<PoisonValue>(VL[Lane])) { + if (isa<PoisonValue>(V)) { for (unsigned OpIdx : seq<unsigned>(NumOperands)) OpsVec[OpIdx][Lane] = {Operands[OpIdx][Lane], true, false}; continue; } - bool IsInverseOperation = false; - if (S.isCopyableElement(VL[Lane])) { - // The value is a copyable element. - IsInverseOperation = !isCommutative(MainOp); - } else { - assert(I && "Expected instruction"); - auto [SelectedOp, Ops] = convertTo(I, S); - // We cannot check commutativity by the converted instruction - // (SelectedOp) because isCommutative also examines def-use - // relationships. - IsInverseOperation = !isCommutative(SelectedOp, I); - } + auto [SelectedOp, Ops] = convertTo(cast<Instruction>(V), S); + // We cannot check commutativity by the converted instruction + // (SelectedOp) because isCommutative also examines def-use + // relationships. + bool IsInverseOperation = + !isCommutative(SelectedOp, cast<Instruction>(V)); for (unsigned OpIdx : seq<unsigned>(ArgSize)) { bool APO = (OpIdx == 0) ? false : IsInverseOperation; OpsVec[OpIdx][Lane] = {Operands[OpIdx][Lane], APO, false}; @@ -3898,9 +3792,6 @@ private: /// reordering of operands during buildTreeRec() and vectorizeTree(). SmallVector<ValueList, 2> Operands; - /// Copyable elements of the entry node. - SmallPtrSet<const Value *, 4> CopyableElements; - /// MainOp and AltOp are recorded inside. S should be obtained from /// newTreeEntry. InstructionsState S = InstructionsState::invalid(); @@ -3929,7 +3820,11 @@ private: void setInterleave(unsigned Factor) { InterleaveFactor = Factor; } /// Marks the node as one that does not require scheduling. - void setDoesNotNeedToSchedule() { DoesNotNeedToSchedule = true; } + void setDoesNotNeedToSchedule() { + assert(::doesNotNeedToSchedule(Scalars) && + "Expected to not need scheduling"); + DoesNotNeedToSchedule = true; + } /// Returns true if the node is marked as one that does not require /// scheduling. bool doesNotNeedToSchedule() const { return DoesNotNeedToSchedule; } @@ -4001,20 +3896,6 @@ private: bool hasState() const { return S.valid(); } - /// Add \p V to the list of copyable elements. - void addCopyableElement(Value *V) { - assert(S.isCopyableElement(V) && "Not a copyable element."); - CopyableElements.insert(V); - } - - /// Returns true if \p V is a copyable element. - bool isCopyableElement(Value *V) const { - return CopyableElements.contains(V); - } - - /// Returns true if any scalar in the list is a copyable element. - bool hasCopyableElements() const { return !CopyableElements.empty(); } - /// When ReuseReorderShuffleIndices is empty it just returns position of \p /// V within vector of Scalars. Otherwise, try to remap on its reuse index. unsigned findLaneForValue(Value *V) const { @@ -4087,8 +3968,6 @@ private: for (Value *V : Scalars) dbgs().indent(2) << *V << "\n"; dbgs() << "State: "; - if (S && hasCopyableElements()) - dbgs() << "[[Copyable]] "; switch (State) { case Vectorize: if (InterleaveFactor > 0) { @@ -4266,20 +4145,12 @@ private: } } } else if (!Last->isGather()) { - if (isa<PHINode>(S.getMainOp()) || - isVectorLikeInstWithConstOps(S.getMainOp()) || - (!S.areInstructionsWithCopyableElements() && - doesNotNeedToSchedule(VL)) || - all_of(VL, [&](Value *V) { return S.isNonSchedulable(V); })) + if (doesNotNeedToSchedule(VL)) Last->setDoesNotNeedToSchedule(); SmallPtrSet<Value *, 4> Processed; for (Value *V : VL) { if (isa<PoisonValue>(V)) continue; - if (S.isCopyableElement(V)) { - Last->addCopyableElement(V); - continue; - } auto It = ScalarToTreeEntries.find(V); if (It == ScalarToTreeEntries.end()) { ScalarToTreeEntries.try_emplace(V).first->getSecond().push_back(Last); @@ -4291,14 +4162,16 @@ private: } } // Update the scheduler bundle to point to this TreeEntry. - assert((!Bundle.getBundle().empty() || Last->doesNotNeedToSchedule()) && + assert((!Bundle.getBundle().empty() || isa<PHINode>(S.getMainOp()) || + isVectorLikeInstWithConstOps(S.getMainOp()) || + Last->doesNotNeedToSchedule()) && "Bundle and VL out of sync"); if (!Bundle.getBundle().empty()) { #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) auto *BundleMember = Bundle.getBundle().begin(); SmallPtrSet<Value *, 4> Processed; for (Value *V : VL) { - if (S.isNonSchedulable(V) || !Processed.insert(V).second) + if (doesNotNeedToBeScheduled(V) || !Processed.insert(V).second) continue; ++BundleMember; } @@ -4407,8 +4280,7 @@ private: /// in general. ScalarsVectorizationLegality getScalarsVectorizationLegality(ArrayRef<Value *> VL, unsigned Depth, - const EdgeInfo &UserTreeIdx, - bool TryCopyableElementsVectorization) const; + const EdgeInfo &UserTreeIdx) const; /// Checks if the specified list of the instructions/values can be vectorized /// and fills required data before actual scheduling of the instructions. @@ -5124,8 +4996,7 @@ private: /// Build a bundle from the ScheduleData nodes corresponding to the /// scalar instruction for each lane. - ScheduleBundle &buildBundle(ArrayRef<Value *> VL, - const InstructionsState &S); + ScheduleBundle &buildBundle(ArrayRef<Value *> VL); /// Checks if a bundle of instructions can be scheduled, i.e. has no /// cyclic dependencies. This is only a dry-run, no instructions are @@ -8011,7 +7882,7 @@ void BoUpSLP::buildExternalUses( // For each lane: for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { Value *Scalar = Entry->Scalars[Lane]; - if (!isa<Instruction>(Scalar) || Entry->isCopyableElement(Scalar)) + if (!isa<Instruction>(Scalar)) continue; // All uses must be replaced already? No need to do it again. auto It = ScalarToExtUses.find(Scalar); @@ -9741,8 +9612,7 @@ static bool tryToFindDuplicates(SmallVectorImpl<Value *> &VL, PoisonValue::get(UniqueValues.front()->getType())); // Check that extended with poisons operations are still valid for // vectorization (div/rem are not allowed). - if (!S.areInstructionsWithCopyableElements() && - !getSameOpcode(PaddedUniqueValues, TLI).valid()) { + if (!getSameOpcode(PaddedUniqueValues, TLI).valid()) { LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); ReuseShuffleIndices.clear(); return false; @@ -9891,95 +9761,13 @@ bool BoUpSLP::canBuildSplitNode(ArrayRef<Value *> VL, } namespace { -/// Class accepts incoming list of values, checks if it is able to model -/// "copyable" values as compatible operations, and generates the list of values -/// for scheduling and list of operands doe the new nodes. +/// Class accepts incoming list of values and generates the list of values +/// for scheduling and list of operands for the new nodes. class InstructionsCompatibilityAnalysis { DominatorTree &DT; const DataLayout &DL; const TargetTransformInfo &TTI; const TargetLibraryInfo &TLI; - unsigned MainOpcode = 0; - Instruction *MainOp = nullptr; - - /// Identifies the best candidate value, which represents main opcode - /// operation. - /// Currently the best candidate is the Add instruction with the parent - /// block with the highest DFS incoming number (block, that dominates other). - void findAndSetMainInstruction(ArrayRef<Value *> VL) { - BasicBlock *Parent = nullptr; - // Checks if the instruction has supported opcode. - auto IsSupportedOpcode = [](Instruction *I) { - return I && I->getOpcode() == Instruction::Add; - }; - SmallDenseSet<Value *, 8> Operands; - for (Value *V : VL) { - auto *I = dyn_cast<Instruction>(V); - if (!I) - continue; - if (!DT.isReachableFromEntry(I->getParent())) - continue; - if (!MainOp) { - MainOp = I; - Parent = I->getParent(); - Operands.insert(I->op_begin(), I->op_end()); - continue; - } - if (Parent == I->getParent()) { - if (!IsSupportedOpcode(MainOp)) - MainOp = I; - if (MainOp->getOpcode() == I->getOpcode() && - doesNotNeedToBeScheduled(MainOp) && !doesNotNeedToBeScheduled(I)) - MainOp = I; - Operands.insert(I->op_begin(), I->op_end()); - continue; - } - auto *NodeA = DT.getNode(Parent); - auto *NodeB = DT.getNode(I->getParent()); - assert(NodeA && "Should only process reachable instructions"); - assert(NodeB && "Should only process reachable instructions"); - assert((NodeA == NodeB) == - (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && - "Different nodes should have different DFS numbers"); - if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn()) { - MainOp = I; - Parent = I->getParent(); - Operands.clear(); - Operands.insert(I->op_begin(), I->op_end()); - } - } - if (!IsSupportedOpcode(MainOp) || Operands.contains(MainOp)) { - MainOp = nullptr; - return; - } - MainOpcode = MainOp->getOpcode(); - } - - /// Returns the idempotent value for the \p MainOp with the detected \p - /// MainOpcode. For Add, returns 0. For Or, it should choose between false and - /// the operand itself, since V or V == V. - Value *selectBestIdempotentValue() const { - assert(MainOpcode == Instruction::Add && "Unsupported opcode"); - return ConstantExpr::getBinOpIdentity(MainOpcode, MainOp->getType(), - !MainOp->isCommutative()); - } - - /// Returns the value and operands for the \p V, considering if it is original - /// instruction and its actual operands should be returned, or it is a - /// copyable element and its should be represented as idempotent instruction. - SmallVector<Value *> getOperands(const InstructionsState &S, Value *V) const { - if (isa<PoisonValue>(V)) - return {V, V}; - if (!S.isCopyableElement(V)) - return convertTo(cast<Instruction>(V), S).second; - switch (MainOpcode) { - case Instruction::Add: - return {V, selectBestIdempotentValue()}; - default: - break; - } - llvm_unreachable("Unsupported opcode"); - } /// Builds operands for the original instructions. void @@ -10140,151 +9928,22 @@ public: const TargetLibraryInfo &TLI) : DT(DT), DL(DL), TTI(TTI), TLI(TLI) {} - InstructionsState - buildInstructionsState(ArrayRef<Value *> VL, const BoUpSLP &R, - bool TryCopyableElementsVectorization, - bool WithProfitabilityCheck = false) { - InstructionsState S = getSameOpcode(VL, TLI); - if (S) - return S; - if (!VectorizeCopyableElements || !TryCopyableElementsVectorization) - return S; - findAndSetMainInstruction(VL); - if (!MainOp) - return InstructionsState::invalid(); - S = InstructionsState(MainOp, MainOp, /*HasCopyables=*/true); - // TODO: Remove this check once support for schulable copyables is landed. - if (any_of(VL, [&](Value *V) { - return S.isCopyableElement(V) && !S.isNonSchedulable(V); - })) - return InstructionsState::invalid(); - - if (!WithProfitabilityCheck) - return S; - // Check if it is profitable to vectorize the instruction. - SmallVector<BoUpSLP::ValueList> Operands = buildOperands(S, VL); - if (VL.size() == 2) { - // Check if the operands allow better vectorization. - SmallVector<std::pair<Value *, Value *>, 4> Candidates; - Candidates.emplace_back(Operands[0][0], Operands[0][1]); - Candidates.emplace_back(Operands[1][0], Operands[1][1]); - if (isCommutative(MainOp)) { - Candidates.emplace_back(Operands[0][0], Operands[1][1]); - Candidates.emplace_back(Operands[1][0], Operands[0][1]); - } - // No good candidates - not profitable. - if (!R.findBestRootPair(Candidates, - BoUpSLP::LookAheadHeuristics::ScoreSplat)) { - // Deeper analysis for 2 splats/constants. - SmallVector<std::pair<Value *, Value *>, 4> Candidates1, Candidates2; - Candidates1.emplace_back(Operands[0][0], Operands[0][1]); - Candidates2.emplace_back(Operands[1][0], Operands[1][1]); - bool Res = - R.findBestRootPair(Candidates1) && R.findBestRootPair(Candidates2); - if (!Res && isCommutative(MainOp)) { - Candidates1.clear(); - Candidates2.clear(); - Candidates1.emplace_back(Operands[0][0], Operands[1][1]); - Candidates2.emplace_back(Operands[1][0], Operands[0][1]); - Res = R.findBestRootPair(Candidates1) && - R.findBestRootPair(Candidates2); - } - if (!Res) - return InstructionsState::invalid(); - } - return S; - } - assert(Operands.size() == 2 && "Unexpected number of operands!"); - unsigned CopyableNum = - count_if(VL, [&](Value *V) { return S.isCopyableElement(V); }); - if (CopyableNum < VL.size() / 2) - return S; - // Check profitability if number of copyables > VL.size() / 2. - // 1. Reorder operands for better matching. - if (isCommutative(MainOp)) { - for (auto &Ops : Operands) { - // Make instructions the first operands. - if (!isa<Instruction>(Ops.front()) && isa<Instruction>(Ops.back())) { - std::swap(Ops.front(), Ops.back()); - continue; - } - // Make constants the second operands. - if (isa<Constant>(Ops.front())) { - std::swap(Ops.front(), Ops.back()); - continue; - } - } - } - // 2. Check, if operands can be vectorized. - if (count_if(Operands.back(), IsaPred<Instruction>) > 1) - return InstructionsState::invalid(); - auto CheckOperand = [&](ArrayRef<Value *> Ops) { - if (allConstant(Ops) || isSplat(Ops)) - return true; - // Check if it is "almost" splat, i.e. has >= 4 elements and only single - // one is different. - constexpr unsigned Limit = 4; - if (Operands.front().size() >= Limit) { - SmallDenseMap<const Value *, unsigned> Counters; - for (Value *V : Ops) { - if (isa<UndefValue>(V)) - continue; - ++Counters[V]; - } - if (Counters.size() == 2 && - any_of(Counters, [&](const std::pair<const Value *, unsigned> &C) { - return C.second == 1; - })) - return true; - } - // First operand not a constant or splat? Last attempt - check for - // potential vectorization. - InstructionsCompatibilityAnalysis Analysis(DT, DL, TTI, TLI); - InstructionsState OpS = Analysis.buildInstructionsState( - Ops, R, /*TryCopyableElementsVectorization=*/true); - if (!OpS) - return false; - unsigned CopyableNum = - count_if(Ops, [&](Value *V) { return OpS.isCopyableElement(V); }); - return CopyableNum <= VL.size() / 2; - }; - if (!CheckOperand(Operands.front())) - return InstructionsState::invalid(); - - return S; - } - SmallVector<BoUpSLP::ValueList> buildOperands(const InstructionsState &S, ArrayRef<Value *> VL) { assert(S && "Invalid state!"); SmallVector<BoUpSLP::ValueList> Operands; - if (S.areInstructionsWithCopyableElements()) { - MainOp = S.getMainOp(); - MainOpcode = S.getOpcode(); - Operands.assign(MainOp->getNumOperands(), - BoUpSLP::ValueList(VL.size(), nullptr)); - for (auto [Idx, V] : enumerate(VL)) { - SmallVector<Value *> OperandsForValue = getOperands(S, V); - for (auto [OperandIdx, Operand] : enumerate(OperandsForValue)) - Operands[OperandIdx][Idx] = Operand; - } - } else { - buildOriginalOperands(S, VL, Operands); - } + buildOriginalOperands(S, VL, Operands); return Operands; } }; } // namespace -BoUpSLP::ScalarsVectorizationLegality BoUpSLP::getScalarsVectorizationLegality( - ArrayRef<Value *> VL, unsigned Depth, const EdgeInfo &UserTreeIdx, - bool TryCopyableElementsVectorization) const { +BoUpSLP::ScalarsVectorizationLegality +BoUpSLP::getScalarsVectorizationLegality(ArrayRef<Value *> VL, unsigned Depth, + const EdgeInfo &UserTreeIdx) const { assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); - InstructionsCompatibilityAnalysis Analysis(*DT, *DL, *TTI, *TLI); - InstructionsState S = Analysis.buildInstructionsState( - VL, *this, TryCopyableElementsVectorization, - /*WithProfitabilityCheck=*/true); + InstructionsState S = getSameOpcode(VL, *TLI); // Don't go into catchswitch blocks, which can happen with PHIs. // Such blocks can only have PHIs and the catchswitch. There is no @@ -10583,9 +10242,9 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth, return true; }; - ScalarsVectorizationLegality Legality = getScalarsVectorizationLegality( - VL, Depth, UserTreeIdx, /*TryCopyableElementsVectorization=*/false); - InstructionsState S = Legality.getInstructionsState(); + ScalarsVectorizationLegality Legality = + getScalarsVectorizationLegality(VL, Depth, UserTreeIdx); + const InstructionsState &S = Legality.getInstructionsState(); if (!Legality.isLegal()) { if (Legality.trySplitVectorize()) { auto [MainOp, AltOp] = getMainAltOpsNoStateVL(VL); @@ -10593,18 +10252,11 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth, if (MainOp && AltOp && TrySplitNode(InstructionsState(MainOp, AltOp))) return; } - if (!S) - Legality = getScalarsVectorizationLegality( - VL, Depth, UserTreeIdx, /*TryCopyableElementsVectorization=*/true); - if (!Legality.isLegal()) { - if (Legality.tryToFindDuplicates()) - tryToFindDuplicates(VL, ReuseShuffleIndices, *TTI, *TLI, S, - UserTreeIdx); + if (Legality.tryToFindDuplicates()) + tryToFindDuplicates(VL, ReuseShuffleIndices, *TTI, *TLI, S, UserTreeIdx); - newGatherTreeEntry(VL, S, UserTreeIdx, ReuseShuffleIndices); - return; - } - S = Legality.getInstructionsState(); + newGatherTreeEntry(VL, S, UserTreeIdx, ReuseShuffleIndices); + return; } // FIXME: investigate if there are profitable cases for VL.size() <= 4. @@ -13372,8 +13024,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, assert(E->getOpcode() && ((allSameType(VL) && allSameBlock(VL)) || (E->getOpcode() == Instruction::GetElementPtr && - E->getMainOp()->getType()->isPointerTy()) || - E->hasCopyableElements()) && + E->getMainOp()->getType()->isPointerTy())) && "Invalid VL"); Instruction *VL0 = E->getMainOp(); unsigned ShuffleOrOp = @@ -13385,7 +13036,6 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, SmallBitVector UsedScalars(Sz, false); for (unsigned I = 0; I < Sz; ++I) { if (isa<Instruction>(UniqueValues[I]) && - !E->isCopyableElement(UniqueValues[I]) && getTreeEntries(UniqueValues[I]).front() == E) continue; UsedScalars.set(I); @@ -16425,8 +16075,6 @@ Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { auto *I = dyn_cast<Instruction>(V); if (!I) continue; - if (E->isCopyableElement(I)) - continue; if (FirstInst->getParent() == I->getParent()) { if (I->comesBefore(FirstInst)) FirstInst = I; @@ -16491,8 +16139,7 @@ Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { return nullptr; for (Value *V : E->Scalars) { auto *I = dyn_cast<Instruction>(V); - if (!I || isa<PHINode>(I) || - (!E->isCopyableElement(I) && doesNotNeedToBeScheduled(I))) + if (!I || isa<PHINode>(I) || doesNotNeedToBeScheduled(I)) continue; ArrayRef<ScheduleBundle *> Bundles = It->second->getScheduleBundles(I); if (Bundles.empty()) @@ -16511,8 +16158,8 @@ Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { [](Value *V) { return !isa<GetElementPtrInst>(V) && isa<Instruction>(V); })) || - all_of(E->Scalars, [&](Value *V) { - return isa<PoisonValue>(V) || E->isCopyableElement(V) || + all_of(E->Scalars, [](Value *V) { + return isa<PoisonValue>(V) || (!isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V)); })) Res = FindLastInst(); @@ -18993,7 +18640,6 @@ Value *BoUpSLP::vectorizeTree( TE->UserTreeIndex.UserTE->State == TreeEntry::Vectorize && (TE->UserTreeIndex.UserTE->getOpcode() != Instruction::PHI || TE->UserTreeIndex.UserTE->isAltShuffle()) && - !TE->UserTreeIndex.UserTE->hasCopyableElements() && all_of(TE->UserTreeIndex.UserTE->Scalars, [](Value *V) { return isUsedOutsideBlock(V); })) { Instruction &LastInst = @@ -19536,7 +19182,7 @@ Value *BoUpSLP::vectorizeTree( if (auto *EE = dyn_cast<ExtractElementInst>(Scalar); EE && IgnoredExtracts.contains(EE)) continue; - if (!isa<Instruction>(Scalar) || Entry->isCopyableElement(Scalar)) + if (isa<PoisonValue>(Scalar)) continue; #ifndef NDEBUG Type *Ty = Scalar->getType(); @@ -19778,15 +19424,12 @@ void BoUpSLP::optimizeGatherSequence() { } BoUpSLP::ScheduleBundle & -BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL, - const InstructionsState &S) { +BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { auto &BundlePtr = ScheduledBundlesList.emplace_back(std::make_unique<ScheduleBundle>()); for (Value *V : VL) { if (doesNotNeedToBeScheduled(V)) continue; - if (S.isCopyableElement(V)) - continue; ScheduleData *BundleMember = getScheduleData(V); assert(BundleMember && "no ScheduleData for bundle member " "(maybe not in same basic block)"); @@ -19807,19 +19450,10 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, const InstructionsState &S) { // No need to schedule PHIs, insertelement, extractelement and extractvalue // instructions. - bool HasCopyables = S.areInstructionsWithCopyableElements(); if (isa<PHINode>(S.getMainOp()) || - isVectorLikeInstWithConstOps(S.getMainOp()) || - (!HasCopyables && doesNotNeedToSchedule(VL)) || - all_of(VL, [&](Value *V) { return S.isNonSchedulable(V); })) + isVectorLikeInstWithConstOps(S.getMainOp()) || doesNotNeedToSchedule(VL)) return nullptr; - // TODO Remove once full support for copyables is landed. - assert(all_of(VL, - [&](Value *V) { - return !S.isCopyableElement(V) || S.isNonSchedulable(V); - }) && - "Copyable elements should not be schedulable"); // Initialize the instruction bundle. Instruction *OldScheduleEnd = ScheduleEnd; LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.getMainOp() << "\n"); @@ -19865,7 +19499,7 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, // Make sure that the scheduling region contains all // instructions of the bundle. for (Value *V : VL) { - if (doesNotNeedToBeScheduled(V) || S.isCopyableElement(V)) + if (doesNotNeedToBeScheduled(V)) continue; if (!extendSchedulingRegion(V, S)) { // If the scheduling region got new instructions at the lower end (or it @@ -19882,7 +19516,7 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, bool ReSchedule = false; for (Value *V : VL) { - if (doesNotNeedToBeScheduled(V) || S.isCopyableElement(V)) + if (doesNotNeedToBeScheduled(V)) continue; ScheduleData *BundleMember = getScheduleData(V); assert(BundleMember && @@ -19907,7 +19541,7 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, ReSchedule = true; } - ScheduleBundle &Bundle = buildBundle(VL, S); + ScheduleBundle &Bundle = buildBundle(VL); TryScheduleBundleImpl(ReSchedule, Bundle); if (!Bundle.isReady()) { for (ScheduleData *BD : Bundle.getBundle()) { @@ -19924,7 +19558,7 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, } ScheduledBundlesList.pop_back(); for (Value *V : VL) { - if (doesNotNeedToBeScheduled(V) || S.isCopyableElement(V)) + if (doesNotNeedToBeScheduled(V)) continue; ScheduledBundles.find(cast<Instruction>(V))->getSecond().pop_back(); } @@ -20553,7 +20187,7 @@ bool BoUpSLP::collectValuesToDemote( }; if (E.isGather() || !Visited.insert(&E).second || any_of(E.Scalars, [&](Value *V) { - return !isa<Constant>(V) && all_of(V->users(), [&](User *U) { + return !isa<PoisonValue>(V) && all_of(V->users(), [&](User *U) { return isa<InsertElementInst>(U) && !isVectorized(U); }); })) @@ -21019,12 +20653,7 @@ void BoUpSLP::computeMinimumValueSizes() { if (!IsKnownPositive) ++BitWidth1; - auto *I = dyn_cast<Instruction>(Root); - if (!I) { - MaxBitWidth = std::max(BitWidth1, MaxBitWidth); - continue; - } - APInt Mask = DB->getDemandedBits(I); + APInt Mask = DB->getDemandedBits(cast<Instruction>(Root)); unsigned BitWidth2 = Mask.getBitWidth() - Mask.countl_zero(); MaxBitWidth = std::max<unsigned>(std::min(BitWidth1, BitWidth2), MaxBitWidth); @@ -21353,9 +20982,7 @@ SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, for (Value *V : Chain) ValOps.insert(cast<StoreInst>(V)->getValueOperand()); // Operands are not same/alt opcodes or non-power-of-2 uniques - exit. - InstructionsCompatibilityAnalysis Analysis(*DT, *DL, *TTI, *TLI); - InstructionsState S = Analysis.buildInstructionsState( - ValOps.getArrayRef(), R, /*TryCopyableElementsVectorization=*/true); + InstructionsState S = getSameOpcode(ValOps.getArrayRef(), *TLI); if (all_of(ValOps, IsaPred<Instruction>) && ValOps.size() > 1) { DenseSet<Value *> Stores(Chain.begin(), Chain.end()); bool IsAllowedSize = diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 57b713d..1fbc3f3 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -3445,7 +3445,6 @@ void VPInterleaveRecipe::execute(VPTransformState &State) { VPValue *BlockInMask = getMask(); VPValue *Addr = getAddr(); Value *ResAddr = State.get(Addr, VPLane(0)); - Value *PoisonVec = PoisonValue::get(VecTy); auto CreateGroupMask = [&BlockInMask, &State, &InterleaveFactor](Value *MaskForGaps) -> Value * { @@ -3484,6 +3483,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) { Instruction *NewLoad; if (BlockInMask || MaskForGaps) { Value *GroupMask = CreateGroupMask(MaskForGaps); + Value *PoisonVec = PoisonValue::get(VecTy); NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr, Group->getAlign(), GroupMask, PoisonVec, "wide.masked.vec"); @@ -3493,57 +3493,39 @@ void VPInterleaveRecipe::execute(VPTransformState &State) { Group->addMetadata(NewLoad); ArrayRef<VPValue *> VPDefs = definedValues(); - const DataLayout &DL = State.CFG.PrevBB->getDataLayout(); if (VecTy->isScalableTy()) { // Scalable vectors cannot use arbitrary shufflevectors (only splats), // so must use intrinsics to deinterleave. assert(InterleaveFactor <= 8 && "Unsupported deinterleave factor for scalable vectors"); - Value *Deinterleave = State.Builder.CreateIntrinsic( + NewLoad = State.Builder.CreateIntrinsic( getDeinterleaveIntrinsicID(InterleaveFactor), NewLoad->getType(), NewLoad, /*FMFSource=*/nullptr, "strided.vec"); + } - for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) { - Instruction *Member = Group->getMember(I); - Value *StridedVec = State.Builder.CreateExtractValue(Deinterleave, I); - if (!Member) { - // This value is not needed as it's not used - cast<Instruction>(StridedVec)->eraseFromParent(); - continue; - } - // If this member has different type, cast the result type. - if (Member->getType() != ScalarTy) { - VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF); - StridedVec = - createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL); - } - - if (Group->isReverse()) - StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse"); - - State.set(VPDefs[J], StridedVec); - ++J; - } + auto CreateStridedVector = [&InterleaveFactor, &State, + &NewLoad](unsigned Index) -> Value * { + assert(Index < InterleaveFactor && "Illegal group index"); + if (State.VF.isScalable()) + return State.Builder.CreateExtractValue(NewLoad, Index); - return; - } - assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); + // For fixed length VF, use shuffle to extract the sub-vectors from the + // wide load. + auto StrideMask = + createStrideMask(Index, InterleaveFactor, State.VF.getFixedValue()); + return State.Builder.CreateShuffleVector(NewLoad, StrideMask, + "strided.vec"); + }; - // For each member in the group, shuffle out the appropriate data from the - // wide loads. - unsigned J = 0; - for (unsigned I = 0; I < InterleaveFactor; ++I) { + for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) { Instruction *Member = Group->getMember(I); // Skip the gaps in the group. if (!Member) continue; - auto StrideMask = - createStrideMask(I, InterleaveFactor, State.VF.getFixedValue()); - Value *StridedVec = - State.Builder.CreateShuffleVector(NewLoad, StrideMask, "strided.vec"); + Value *StridedVec = CreateStridedVector(I); // If this member has different type, cast the result type. if (Member->getType() != ScalarTy) { diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 2a92083..cb370fe 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1481,9 +1481,9 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF, // (BranchOnCond true). auto *Header = cast<VPBasicBlock>(VectorRegion->getEntry()); auto *CanIVTy = Plan.getCanonicalIV()->getScalarType(); - if (all_of( - Header->phis(), - IsaPred<VPCanonicalIVPHIRecipe, VPFirstOrderRecurrencePHIRecipe>)) { + if (all_of(Header->phis(), + IsaPred<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe, + VPFirstOrderRecurrencePHIRecipe>)) { for (VPRecipeBase &HeaderR : make_early_inc_range(Header->phis())) { auto *HeaderPhiR = cast<VPHeaderPHIRecipe>(&HeaderR); HeaderPhiR->replaceAllUsesWith(HeaderPhiR->getStartValue()); diff --git a/llvm/test/Assembler/difile-empty-source.ll b/llvm/test/Assembler/difile-empty-source.ll new file mode 100644 index 0000000..11587d8 --- /dev/null +++ b/llvm/test/Assembler/difile-empty-source.ll @@ -0,0 +1,12 @@ +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s +; RUN: verify-uselistorder + +; CHECK: !DIFile({{.*}}, source: "") + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2, !3} + +!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, emissionKind: FullDebug) +!1 = !DIFile(filename: "-", directory: "/", checksumkind: CSK_MD5, checksum: "d41d8cd98f00b204e9800998ecf8427e", source: "") +!2 = !{i32 7, !"Dwarf Version", i32 5} +!3 = !{i32 2, !"Debug Info Version", i32 3} diff --git a/llvm/test/CodeGen/AArch64/aarch64-mops.ll b/llvm/test/CodeGen/AArch64/aarch64-mops.ll index ff7872c..83530049a 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-mops.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mops.ll @@ -87,46 +87,17 @@ entry: } define void @memset_10_zeroval_volatile(ptr %dst) { -; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_zeroval_volatile: -; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-WITHOUT-MOPS-O0-NEXT: mov w2, w8 -; GISel-WITHOUT-MOPS-O0-NEXT: mov w1, wzr -; GISel-WITHOUT-MOPS-O0-NEXT: bl memset -; GISel-WITHOUT-MOPS-O0-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload -; GISel-WITHOUT-MOPS-O0-NEXT: ret -; -; GISel-WITHOUT-MOPS-O3-LABEL: memset_10_zeroval_volatile: -; GISel-WITHOUT-MOPS-O3: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O3-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O3-NEXT: mov w1, wzr -; GISel-WITHOUT-MOPS-O3-NEXT: mov w2, #10 // =0xa -; GISel-WITHOUT-MOPS-O3-NEXT: bl memset -; GISel-WITHOUT-MOPS-O3-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload -; GISel-WITHOUT-MOPS-O3-NEXT: ret -; -; GISel-MOPS-O0-LABEL: memset_10_zeroval_volatile: -; GISel-MOPS-O0: // %bb.0: // %entry -; GISel-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O0-NEXT: // kill: def $x8 killed $w8 -; GISel-MOPS-O0-NEXT: mov x9, xzr -; GISel-MOPS-O0-NEXT: setp [x0]!, x8!, x9 -; GISel-MOPS-O0-NEXT: setm [x0]!, x8!, x9 -; GISel-MOPS-O0-NEXT: sete [x0]!, x8!, x9 -; GISel-MOPS-O0-NEXT: ret +; GISel-WITHOUT-MOPS-LABEL: memset_10_zeroval_volatile: +; GISel-WITHOUT-MOPS: // %bb.0: // %entry +; GISel-WITHOUT-MOPS-NEXT: str xzr, [x0] +; GISel-WITHOUT-MOPS-NEXT: strh wzr, [x0, #8] +; GISel-WITHOUT-MOPS-NEXT: ret ; -; GISel-MOPS-O3-LABEL: memset_10_zeroval_volatile: -; GISel-MOPS-O3: // %bb.0: // %entry -; GISel-MOPS-O3-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O3-NEXT: setp [x0]!, x8!, xzr -; GISel-MOPS-O3-NEXT: setm [x0]!, x8!, xzr -; GISel-MOPS-O3-NEXT: sete [x0]!, x8!, xzr -; GISel-MOPS-O3-NEXT: ret +; GISel-MOPS-LABEL: memset_10_zeroval_volatile: +; GISel-MOPS: // %bb.0: // %entry +; GISel-MOPS-NEXT: str xzr, [x0] +; GISel-MOPS-NEXT: strh wzr, [x0, #8] +; GISel-MOPS-NEXT: ret ; ; SDAG-WITHOUT-MOPS-O2-LABEL: memset_10_zeroval_volatile: ; SDAG-WITHOUT-MOPS-O2: // %bb.0: // %entry @@ -490,43 +461,46 @@ entry: define void @memset_10_volatile(ptr %dst, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-WITHOUT-MOPS-O0-NEXT: mov w2, w8 -; GISel-WITHOUT-MOPS-O0-NEXT: bl memset -; GISel-WITHOUT-MOPS-O0-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; GISel-WITHOUT-MOPS-O0-NEXT: // implicit-def: $x8 +; GISel-WITHOUT-MOPS-O0-NEXT: mov w8, w1 +; GISel-WITHOUT-MOPS-O0-NEXT: and x8, x8, #0xff +; GISel-WITHOUT-MOPS-O0-NEXT: mov x9, #72340172838076673 // =0x101010101010101 +; GISel-WITHOUT-MOPS-O0-NEXT: mul x8, x8, x9 +; GISel-WITHOUT-MOPS-O0-NEXT: str x8, [x0] +; GISel-WITHOUT-MOPS-O0-NEXT: // kill: def $w8 killed $w8 killed $x8 +; GISel-WITHOUT-MOPS-O0-NEXT: strh w8, [x0, #8] ; GISel-WITHOUT-MOPS-O0-NEXT: ret ; ; GISel-WITHOUT-MOPS-O3-LABEL: memset_10_volatile: ; GISel-WITHOUT-MOPS-O3: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O3-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O3-NEXT: mov w2, #10 // =0xa -; GISel-WITHOUT-MOPS-O3-NEXT: bl memset -; GISel-WITHOUT-MOPS-O3-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; GISel-WITHOUT-MOPS-O3-NEXT: // kill: def $w1 killed $w1 def $x1 +; GISel-WITHOUT-MOPS-O3-NEXT: mov x8, #72340172838076673 // =0x101010101010101 +; GISel-WITHOUT-MOPS-O3-NEXT: and x9, x1, #0xff +; GISel-WITHOUT-MOPS-O3-NEXT: mul x8, x9, x8 +; GISel-WITHOUT-MOPS-O3-NEXT: str x8, [x0] +; GISel-WITHOUT-MOPS-O3-NEXT: strh w8, [x0, #8] ; GISel-WITHOUT-MOPS-O3-NEXT: ret ; ; GISel-MOPS-O0-LABEL: memset_10_volatile: ; GISel-MOPS-O0: // %bb.0: // %entry -; GISel-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O0-NEXT: // kill: def $x8 killed $w8 -; GISel-MOPS-O0-NEXT: // implicit-def: $x9 -; GISel-MOPS-O0-NEXT: mov w9, w1 -; GISel-MOPS-O0-NEXT: setp [x0]!, x8!, x9 -; GISel-MOPS-O0-NEXT: setm [x0]!, x8!, x9 -; GISel-MOPS-O0-NEXT: sete [x0]!, x8!, x9 +; GISel-MOPS-O0-NEXT: // implicit-def: $x8 +; GISel-MOPS-O0-NEXT: mov w8, w1 +; GISel-MOPS-O0-NEXT: and x8, x8, #0xff +; GISel-MOPS-O0-NEXT: mov x9, #72340172838076673 // =0x101010101010101 +; GISel-MOPS-O0-NEXT: mul x8, x8, x9 +; GISel-MOPS-O0-NEXT: str x8, [x0] +; GISel-MOPS-O0-NEXT: // kill: def $w8 killed $w8 killed $x8 +; GISel-MOPS-O0-NEXT: strh w8, [x0, #8] ; GISel-MOPS-O0-NEXT: ret ; ; GISel-MOPS-O3-LABEL: memset_10_volatile: ; GISel-MOPS-O3: // %bb.0: // %entry -; GISel-MOPS-O3-NEXT: mov w8, #10 // =0xa ; GISel-MOPS-O3-NEXT: // kill: def $w1 killed $w1 def $x1 -; GISel-MOPS-O3-NEXT: setp [x0]!, x8!, x1 -; GISel-MOPS-O3-NEXT: setm [x0]!, x8!, x1 -; GISel-MOPS-O3-NEXT: sete [x0]!, x8!, x1 +; GISel-MOPS-O3-NEXT: mov x8, #72340172838076673 // =0x101010101010101 +; GISel-MOPS-O3-NEXT: and x9, x1, #0xff +; GISel-MOPS-O3-NEXT: mul x8, x9, x8 +; GISel-MOPS-O3-NEXT: str x8, [x0] +; GISel-MOPS-O3-NEXT: strh w8, [x0, #8] ; GISel-MOPS-O3-NEXT: ret ; ; SDAG-WITHOUT-MOPS-O2-LABEL: memset_10_volatile: @@ -905,43 +879,21 @@ entry: } define void @memcpy_10_volatile(ptr %dst, ptr %src, i32 %value) { -; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_10_volatile: -; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-WITHOUT-MOPS-O0-NEXT: mov w2, w8 -; GISel-WITHOUT-MOPS-O0-NEXT: bl memcpy -; GISel-WITHOUT-MOPS-O0-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload -; GISel-WITHOUT-MOPS-O0-NEXT: ret -; -; GISel-WITHOUT-MOPS-O3-LABEL: memcpy_10_volatile: -; GISel-WITHOUT-MOPS-O3: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O3-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O3-NEXT: mov w2, #10 // =0xa -; GISel-WITHOUT-MOPS-O3-NEXT: bl memcpy -; GISel-WITHOUT-MOPS-O3-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload -; GISel-WITHOUT-MOPS-O3-NEXT: ret -; -; GISel-MOPS-O0-LABEL: memcpy_10_volatile: -; GISel-MOPS-O0: // %bb.0: // %entry -; GISel-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O0-NEXT: // kill: def $x8 killed $w8 -; GISel-MOPS-O0-NEXT: cpyfp [x0]!, [x1]!, x8! -; GISel-MOPS-O0-NEXT: cpyfm [x0]!, [x1]!, x8! -; GISel-MOPS-O0-NEXT: cpyfe [x0]!, [x1]!, x8! -; GISel-MOPS-O0-NEXT: ret +; GISel-WITHOUT-MOPS-LABEL: memcpy_10_volatile: +; GISel-WITHOUT-MOPS: // %bb.0: // %entry +; GISel-WITHOUT-MOPS-NEXT: ldr x8, [x1] +; GISel-WITHOUT-MOPS-NEXT: str x8, [x0] +; GISel-WITHOUT-MOPS-NEXT: ldrh w8, [x1, #8] +; GISel-WITHOUT-MOPS-NEXT: strh w8, [x0, #8] +; GISel-WITHOUT-MOPS-NEXT: ret ; -; GISel-MOPS-O3-LABEL: memcpy_10_volatile: -; GISel-MOPS-O3: // %bb.0: // %entry -; GISel-MOPS-O3-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O3-NEXT: cpyfp [x0]!, [x1]!, x8! -; GISel-MOPS-O3-NEXT: cpyfm [x0]!, [x1]!, x8! -; GISel-MOPS-O3-NEXT: cpyfe [x0]!, [x1]!, x8! -; GISel-MOPS-O3-NEXT: ret +; GISel-MOPS-LABEL: memcpy_10_volatile: +; GISel-MOPS: // %bb.0: // %entry +; GISel-MOPS-NEXT: ldr x8, [x1] +; GISel-MOPS-NEXT: str x8, [x0] +; GISel-MOPS-NEXT: ldrh w8, [x1, #8] +; GISel-MOPS-NEXT: strh w8, [x0, #8] +; GISel-MOPS-NEXT: ret ; ; SDAG-WITHOUT-MOPS-O2-LABEL: memcpy_10_volatile: ; SDAG-WITHOUT-MOPS-O2: // %bb.0: // %entry @@ -1736,40 +1688,34 @@ entry: define void @memmove_10_volatile(ptr %dst, ptr %src, i32 %value) { ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_10_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-WITHOUT-MOPS-O0-NEXT: mov w2, w8 -; GISel-WITHOUT-MOPS-O0-NEXT: bl memmove -; GISel-WITHOUT-MOPS-O0-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; GISel-WITHOUT-MOPS-O0-NEXT: ldr x9, [x1] +; GISel-WITHOUT-MOPS-O0-NEXT: ldrh w8, [x1, #8] +; GISel-WITHOUT-MOPS-O0-NEXT: str x9, [x0] +; GISel-WITHOUT-MOPS-O0-NEXT: strh w8, [x0, #8] ; GISel-WITHOUT-MOPS-O0-NEXT: ret ; ; GISel-WITHOUT-MOPS-O3-LABEL: memmove_10_volatile: ; GISel-WITHOUT-MOPS-O3: // %bb.0: // %entry -; GISel-WITHOUT-MOPS-O3-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_def_cfa_offset 16 -; GISel-WITHOUT-MOPS-O3-NEXT: .cfi_offset w30, -16 -; GISel-WITHOUT-MOPS-O3-NEXT: mov w2, #10 // =0xa -; GISel-WITHOUT-MOPS-O3-NEXT: bl memmove -; GISel-WITHOUT-MOPS-O3-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; GISel-WITHOUT-MOPS-O3-NEXT: ldr x8, [x1] +; GISel-WITHOUT-MOPS-O3-NEXT: ldrh w9, [x1, #8] +; GISel-WITHOUT-MOPS-O3-NEXT: str x8, [x0] +; GISel-WITHOUT-MOPS-O3-NEXT: strh w9, [x0, #8] ; GISel-WITHOUT-MOPS-O3-NEXT: ret ; ; GISel-MOPS-O0-LABEL: memmove_10_volatile: ; GISel-MOPS-O0: // %bb.0: // %entry -; GISel-MOPS-O0-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O0-NEXT: // kill: def $x8 killed $w8 -; GISel-MOPS-O0-NEXT: cpyp [x0]!, [x1]!, x8! -; GISel-MOPS-O0-NEXT: cpym [x0]!, [x1]!, x8! -; GISel-MOPS-O0-NEXT: cpye [x0]!, [x1]!, x8! +; GISel-MOPS-O0-NEXT: ldr x9, [x1] +; GISel-MOPS-O0-NEXT: ldrh w8, [x1, #8] +; GISel-MOPS-O0-NEXT: str x9, [x0] +; GISel-MOPS-O0-NEXT: strh w8, [x0, #8] ; GISel-MOPS-O0-NEXT: ret ; ; GISel-MOPS-O3-LABEL: memmove_10_volatile: ; GISel-MOPS-O3: // %bb.0: // %entry -; GISel-MOPS-O3-NEXT: mov w8, #10 // =0xa -; GISel-MOPS-O3-NEXT: cpyp [x0]!, [x1]!, x8! -; GISel-MOPS-O3-NEXT: cpym [x0]!, [x1]!, x8! -; GISel-MOPS-O3-NEXT: cpye [x0]!, [x1]!, x8! +; GISel-MOPS-O3-NEXT: ldr x8, [x1] +; GISel-MOPS-O3-NEXT: ldrh w9, [x1, #8] +; GISel-MOPS-O3-NEXT: str x8, [x0] +; GISel-MOPS-O3-NEXT: strh w9, [x0, #8] ; GISel-MOPS-O3-NEXT: ret ; ; SDAG-WITHOUT-MOPS-O2-LABEL: memmove_10_volatile: diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll index e31c9a0..113eb14 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll @@ -263,3 +263,110 @@ entry: %conv = zext i1 %cmp to i8 ret i8 %conv } + +; Test ANDS. +define i32 @test1_ands(i32 %a) { +; CHECK-LABEL: test1_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and w8, w0, #0x3ffc00 +; CHECK-NEXT: ands w8, w8, #0xffe007ff +; CHECK-NEXT: csel w0, w0, w8, eq +; CHECK-NEXT: ret +entry: + %ands = and i32 %a, 2098176 + %c = icmp eq i32 %ands, 0 + %r = select i1 %c, i32 %a, i32 %ands + ret i32 %r +} + +; This constant should not be split because it can be handled by one mov. +define i32 @test2_ands(i32 %a) { +; CHECK-LABEL: test2_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: ands w8, w0, w8 +; CHECK-NEXT: csel w0, w0, w8, eq +; CHECK-NEXT: ret +entry: + %ands = and i32 %a, 135 + %c = icmp eq i32 %ands, 0 + %r = select i1 %c, i32 %a, i32 %ands + ret i32 %r +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i32 @test3_ands(i32 %a) { +; CHECK-LABEL: test3_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: ands w8, w0, w8 +; CHECK-NEXT: csel w0, w0, w8, eq +; CHECK-NEXT: ret +entry: + %ands = and i32 %a, 2163712 + %c = icmp eq i32 %ands, 0 + %r = select i1 %c, i32 %a, i32 %ands + ret i32 %r +} + +define i64 @test4_ands(i64 %a) { +; CHECK-LABEL: test4_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and x8, x0, #0x3ffc00 +; CHECK-NEXT: ands x8, x8, #0xffffffffffe007ff +; CHECK-NEXT: csel x0, x0, x8, eq +; CHECK-NEXT: ret +entry: + %ands = and i64 %a, 2098176 + %c = icmp eq i64 %ands, 0 + %r = select i1 %c, i64 %a, i64 %ands + ret i64 %r +} + +define i64 @test5_ands(i64 %a) { +; CHECK-LABEL: test5_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: and x8, x0, #0x3ffffc000 +; CHECK-NEXT: ands x8, x8, #0xfffffffe00007fff +; CHECK-NEXT: csel x0, x0, x8, eq +; CHECK-NEXT: ret +entry: + %ands = and i64 %a, 8589950976 + %c = icmp eq i64 %ands, 0 + %r = select i1 %c, i64 %a, i64 %ands + ret i64 %r +} + +; This constant should not be split because it can be handled by one mov. +define i64 @test6_ands(i64 %a) { +; CHECK-LABEL: test6_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: ands x8, x0, x8 +; CHECK-NEXT: csel x0, x0, x8, eq +; CHECK-NEXT: ret +entry: + %ands = and i64 %a, 135 + %c = icmp eq i64 %ands, 0 + %r = select i1 %c, i64 %a, i64 %ands + ret i64 %r +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i64 @test7_ands(i64 %a) { +; CHECK-LABEL: test7_ands: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: ands x8, x0, x8 +; CHECK-NEXT: csel x0, x0, x8, eq +; CHECK-NEXT: ret +entry: + %ands = and i64 %a, 2163712 + %c = icmp eq i64 %ands, 0 + %r = select i1 %c, i64 %a, i64 %ands + ret i64 %r +} diff --git a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir new file mode 100644 index 0000000..23ac67c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir @@ -0,0 +1,98 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass aarch64-expand-pseudo -verify-machineinstrs %s -o - | FileCheck %s + + +--- +name: BSL_COPY +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + + + ; CHECK-LABEL: name: BSL_COPY + ; CHECK: liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $q2 = ORRv16i8 killed renamable $q20, killed renamable $q20 + ; CHECK-NEXT: renamable $q2 = BSLv16i8 killed renamable $q2, renamable $q21, renamable $q6, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + ; CHECK-NEXT: $q22 = ORRv16i8 $q0, killed $q0 + ; CHECK-NEXT: $q23 = ORRv16i8 $q1, killed $q1 + ; CHECK-NEXT: $q24 = ORRv16i8 $q2, killed $q2 + ; CHECK-NEXT: $q25 = ORRv16i8 $q3, killed $q3 + ; CHECK-NEXT: RET undef $lr, implicit $q22 + renamable $q2 = BSPv16i8 killed renamable $q20, renamable $q21, renamable $q6, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + $q22 = ORRv16i8 $q0, killed $q0 + $q23 = ORRv16i8 $q1, killed $q1 + $q24 = ORRv16i8 $q2, killed $q2 + $q25 = ORRv16i8 $q3, killed $q3 + RET_ReallyLR implicit $q22 +... +--- +name: BSL +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + + ; CHECK-LABEL: name: BSL + ; CHECK: liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $q2 = BSLv16i8 killed renamable $q2, renamable $q21, renamable $q6, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + ; CHECK-NEXT: $q22 = ORRv16i8 $q0, killed $q0 + ; CHECK-NEXT: $q23 = ORRv16i8 $q1, killed $q1 + ; CHECK-NEXT: $q24 = ORRv16i8 $q2, killed $q2 + ; CHECK-NEXT: $q25 = ORRv16i8 $q3, killed $q3 + ; CHECK-NEXT: RET undef $lr, implicit $q22 + renamable $q2 = BSPv16i8 killed renamable $q2, renamable $q21, renamable $q6, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + $q22 = ORRv16i8 $q0, killed $q0 + $q23 = ORRv16i8 $q1, killed $q1 + $q24 = ORRv16i8 $q2, killed $q2 + $q25 = ORRv16i8 $q3, killed $q3 + RET_ReallyLR implicit $q22 +... +--- +name: BIF +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + + ; CHECK-LABEL: name: BIF + ; CHECK: liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $q2 = BIFv16i8 renamable $q2, renamable $q6, killed renamable $q20, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + ; CHECK-NEXT: $q22 = ORRv16i8 $q0, killed $q0 + ; CHECK-NEXT: $q23 = ORRv16i8 $q1, killed $q1 + ; CHECK-NEXT: $q24 = ORRv16i8 $q2, killed $q2 + ; CHECK-NEXT: $q25 = ORRv16i8 $q3, killed $q3 + ; CHECK-NEXT: RET undef $lr, implicit $q22 + renamable $q2 = BSPv16i8 killed renamable $q20, renamable $q2, renamable $q6, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + $q22 = ORRv16i8 $q0, killed $q0 + $q23 = ORRv16i8 $q1, killed $q1 + $q24 = ORRv16i8 $q2, killed $q2 + $q25 = ORRv16i8 $q3, killed $q3 + RET_ReallyLR implicit $q22 +... +--- +name: BIT +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + + ; CHECK-LABEL: name: BIT + ; CHECK: liveins: $q20, $q21, $q22, $q23, $q6, $q1, $q7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $q2 = BITv16i8 renamable $q2, renamable $q21, killed renamable $q20, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + ; CHECK-NEXT: $q22 = ORRv16i8 $q0, killed $q0 + ; CHECK-NEXT: $q23 = ORRv16i8 $q1, killed $q1 + ; CHECK-NEXT: $q24 = ORRv16i8 $q2, killed $q2 + ; CHECK-NEXT: $q25 = ORRv16i8 $q3, killed $q3 + ; CHECK-NEXT: RET undef $lr, implicit $q22 + renamable $q2 = BSPv16i8 killed renamable $q20, renamable $q21, renamable $q2, implicit killed $q21_q22_q23, implicit killed $q0_q1_q2_q3, implicit-def $q0_q1_q2_q3 + $q22 = ORRv16i8 $q0, killed $q0 + $q23 = ORRv16i8 $q1, killed $q1 + $q24 = ORRv16i8 $q2, killed $q2 + $q25 = ORRv16i8 $q3, killed $q3 + RET_ReallyLR implicit $q22 +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpy.mir index be3fe91..4f5f52b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpy.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpy.mir @@ -31,3 +31,33 @@ body: | S_ENDPGM 0 ... +--- +name: memcpy_test_volatile +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: memcpy_test_volatile + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV1]](p0) :: (volatile load (s8)) + ; CHECK-NEXT: G_STORE [[LOAD]](s32), [[MV]](p0) :: (volatile store (s8)) + ; CHECK-NEXT: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s32) = COPY $vgpr3 + %5:_(p0) = G_MERGE_VALUES %3:_(s32), %4:_(s32) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMCPY %2:_(p0), %5:_(p0), %7:_(s64), 0 :: (volatile store (s8)), (volatile load (s8)) + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpyinline.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpyinline.mir index a82ca30..0392aef 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpyinline.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpyinline.mir @@ -31,3 +31,33 @@ body: | S_ENDPGM 0 ... +--- +name: memcpyinline_test_volatile +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: memcpyinline_test_volatile + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV1]](p0) :: (volatile load (s8)) + ; CHECK-NEXT: G_STORE [[LOAD]](s32), [[MV]](p0) :: (volatile store (s8)) + ; CHECK-NEXT: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s32) = COPY $vgpr3 + %5:_(p0) = G_MERGE_VALUES %3:_(s32), %4:_(s32) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMCPY_INLINE %2:_(p0), %5:_(p0), %7:_(s64) :: (volatile store (s8)), (volatile load (s8)) + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memmove.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memmove.mir index e7cfaab..1f8d1aa 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memmove.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memmove.mir @@ -31,3 +31,33 @@ body: | S_ENDPGM 0 ... +--- +name: memmove_test_volatile +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: memmove_test_volatile + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV1]](p0) :: (volatile load (s8)) + ; CHECK-NEXT: G_STORE [[LOAD]](s32), [[MV]](p0) :: (volatile store (s8)) + ; CHECK-NEXT: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s32) = COPY $vgpr3 + %5:_(p0) = G_MERGE_VALUES %3:_(s32), %4:_(s32) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMMOVE %2:_(p0), %5:_(p0), %7:_(s64), 0 :: (volatile store (s8)), (volatile load (s8)) + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memset.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memset.mir index 021cebb..dda94e15 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memset.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memset.mir @@ -30,3 +30,32 @@ body: | S_ENDPGM 0 ... +--- +name: memset_test_volatile +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; CHECK-LABEL: name: memset_test_volatile + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32) + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s8) = COPY [[TRUNC]](s8) + ; CHECK-NEXT: G_STORE [[COPY2]](s32), [[MV]](p0) :: (volatile store (s8)) + ; CHECK-NEXT: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s16) = G_TRUNC %3:_(s32) + %5:_(s8) = G_TRUNC %4:_(s16) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMSET %2:_(p0), %5:_(s8), %7:_(s64), 0 :: (volatile store (s8)) + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll index 4cb0d2d..e6c38d2 100644 --- a/llvm/test/CodeGen/AMDGPU/div_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll @@ -475,28 +475,21 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8 +; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_nop 0 +; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6 -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] -; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9] +; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13] -; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15] +; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15] -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7] -; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15] +; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9] ; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6 @@ -507,7 +500,6 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5 ; GFX9-O0-NEXT: s_mov_b32 s14, s13 ; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14 -; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13 ; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec @@ -1046,10 +1038,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 ; GFX9-O0-NEXT: s_waitcnt vmcnt(1) @@ -2667,28 +2659,21 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8 +; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_nop 0 +; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6 -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] -; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9] +; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13] -; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15] +; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15] -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7] -; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15] +; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9] ; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6 @@ -2699,7 +2684,6 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5 ; GFX9-O0-NEXT: s_mov_b32 s14, s13 ; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14 -; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13 ; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec @@ -3238,10 +3222,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 ; GFX9-O0-NEXT: s_waitcnt vmcnt(1) diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi-gfx1250.mir b/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi-gfx1250.mir new file mode 100644 index 0000000..e5955ad --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi-gfx1250.mir @@ -0,0 +1,43 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -start-before=si-fold-operands -stop-after=prologepilog -o - %s | FileCheck -check-prefix=GCN %s + +--- +name: test_fold_fi_scratch_load_vgpr +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + stackPtrOffsetReg: $sgpr32 +stack: + - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4 } +body: | + bb.0.entry: + ; GCN-LABEL: name: test_fold_fi_scratch_load_vgpr + ; GCN: renamable $vgpr0 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.0, addrspace 5) + ; GCN-NEXT: S_ENDPGM 0, implicit killed renamable $vgpr0 + %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec + %1:vgpr_32 = SCRATCH_LOAD_DWORD %0:vgpr_32, 4, 0, implicit $exec, implicit $flat_scr :: (load 4 from %stack.0, addrspace 5) + S_ENDPGM 0, implicit %1 + +... + +# SS form of the SCRATCH_LOAD_DWORD does not support offset scaling + +--- +name: test_no_fold_fi_scratch_load_vgpr_scale_offset +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + stackPtrOffsetReg: $sgpr32 +stack: + - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4 } +body: | + bb.0.entry: + ; GCN-LABEL: name: test_no_fold_fi_scratch_load_vgpr_scale_offset + ; GCN: renamable $vgpr0 = V_MOV_B32_e32 $sgpr32, implicit $exec + ; GCN-NEXT: renamable $vgpr0 = SCRATCH_LOAD_DWORD killed renamable $vgpr0, 4, 2048, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.0, addrspace 5) + ; GCN-NEXT: S_ENDPGM 0, implicit killed renamable $vgpr0 + %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec + %1:vgpr_32 = SCRATCH_LOAD_DWORD %0:vgpr_32, 4, 2048, implicit $exec, implicit $flat_scr :: (load 4 from %stack.0, addrspace 5) + S_ENDPGM 0, implicit %1 + +... diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll index 355f77a..af914bd 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll @@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) % ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_mov_b64 s[4:5], s[2:3] ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 -; SI-NEXT: s_movk_i32 s4, 0xfc01 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b32 s3, 0xfffff ; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_bfe_u32 v4, v3, 20, 11 -; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4 ; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6 ; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3 ; SI-NEXT: v_not_b32_e32 v5, v5 diff --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir new file mode 100644 index 0000000..76e2092 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir @@ -0,0 +1,104 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -run-pass=si-load-store-opt -o - %s | FileCheck -check-prefix=GCN %s + +--- +name: merge_global_load_dword_2_no_scale_offset +body: | + bb.0.entry: + + ; GCN-LABEL: name: merge_global_load_dword_2_no_scale_offset + ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF + ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1) + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1 + ; GCN-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[COPY]] + %0:sreg_64_xexec_xnull = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 1, implicit $exec :: (load (s32) from `float addrspace(1)* undef` + 4, basealign 4, addrspace 1) + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 8, basealign 4, addrspace 1) + S_NOP 0, implicit %1, implicit %2 +... + +--- +name: no_merge_global_load_dword_2_same_scale_offset +body: | + bb.0.entry: + + ; GCN-LABEL: name: no_merge_global_load_dword_2_same_scale_offset + ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF + ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 2049, implicit $exec :: (load (s32) from `ptr addrspace(1) undef` + 4, addrspace 1) + ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 4, 2049, implicit $exec :: (load (s32) from `ptr addrspace(1) undef` + 8, addrspace 1) + ; GCN-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[GLOBAL_LOAD_DWORD_SADDR]] + %0:sreg_64_xexec_xnull = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 2049, implicit $exec :: (load (s32) from `float addrspace(1)* undef` + 4, basealign 4, addrspace 1) + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 2049, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 8, basealign 4, addrspace 1) + S_NOP 0, implicit %1, implicit %2 +... + +--- +name: no_merge_global_load_dword_2_different_scale_offset +body: | + bb.0.entry: + + ; GCN-LABEL: name: no_merge_global_load_dword_2_different_scale_offset + ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF + ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef` + 4, addrspace 1) + ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 4, 2048, implicit $exec :: (load (s32) from `ptr addrspace(1) undef` + 8, addrspace 1) + ; GCN-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[GLOBAL_LOAD_DWORD_SADDR]] + %0:sreg_64_xexec_xnull = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from `float addrspace(1)* undef` + 4, basealign 4, addrspace 1) + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 2048, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 8, basealign 4, addrspace 1) + S_NOP 0, implicit %1, implicit %2 +... + +# NB: We do not currently support merging SGPR offset and SGPR+Imm offset forms +# of S_LOAD, but the check stays the same: these cannot be merged with different +# scale offsets. +# +# We also do not currently merge flat scratch instructions, although a common +# check in the merge logic that CPol shall not be set for merge to happen. + +--- +name: merge_s_load_x1_x1_imm_no_scale_offset +body: | + bb.0: + ; GCN-LABEL: name: merge_s_load_x1_x1_imm_no_scale_offset + ; GCN: [[DEF:%[0-9]+]]:sgpr_64 = IMPLICIT_DEF + ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4) + ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[S_LOAD_DWORDX2_IMM]].sub0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_LOAD_DWORDX2_IMM]].sub1 + %0:sgpr_64 = IMPLICIT_DEF + %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 0, 0 :: (dereferenceable invariant load (s32)) + %2:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 4, 0 :: (dereferenceable invariant load (s32)) +... + +--- +name: no_merge_s_load_x1_x1_imm_same_scale_offset +body: | + bb.0: + ; GCN-LABEL: name: no_merge_s_load_x1_x1_imm_same_scale_offset + ; GCN: [[DEF:%[0-9]+]]:sgpr_64 = IMPLICIT_DEF + ; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[DEF]], 0, 2048 :: (dereferenceable invariant load (s32)) + ; GCN-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[DEF]], 4, 2048 :: (dereferenceable invariant load (s32)) + %0:sgpr_64 = IMPLICIT_DEF + %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 0, 2048 :: (dereferenceable invariant load (s32)) + %2:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 4, 2048 :: (dereferenceable invariant load (s32)) +... + +--- +name: no_merge_s_load_x1_x1_imm_different_scale_offset +body: | + bb.0: + ; GCN-LABEL: name: no_merge_s_load_x1_x1_imm_different_scale_offset + ; GCN: [[DEF:%[0-9]+]]:sgpr_64 = IMPLICIT_DEF + ; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[DEF]], 0, 0 :: (dereferenceable invariant load (s32)) + ; GCN-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[DEF]], 4, 2048 :: (dereferenceable invariant load (s32)) + %0:sgpr_64 = IMPLICIT_DEF + %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 0, 0 :: (dereferenceable invariant load (s32)) + %2:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 4, 2048 :: (dereferenceable invariant load (s32)) +... diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll index 5d0e4bf..8fe68ba 100644 --- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll @@ -513,28 +513,21 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8 +; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_nop 0 +; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6 -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] -; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9] +; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13] -; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15] +; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15] -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7] -; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15] +; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9] ; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6 @@ -545,7 +538,6 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5 ; GFX9-O0-NEXT: s_mov_b32 s14, s13 ; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14 -; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13 ; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec @@ -1084,10 +1076,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 ; GFX9-O0-NEXT: s_waitcnt vmcnt(1) @@ -1900,28 +1892,21 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8 +; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; GFX9-O0-NEXT: s_nop 0 +; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 ; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6 -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5 -; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4 -; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7] -; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9] +; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7] ; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13] -; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15] +; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15] -; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7] -; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15] +; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15] ; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9] ; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6 @@ -1932,7 +1917,6 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5 ; GFX9-O0-NEXT: s_mov_b32 s14, s13 ; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14 -; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13 ; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12 ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec @@ -2471,10 +2455,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1 ; GFX9-O0-NEXT: s_mov_b32 s5, s6 ; GFX9-O0-NEXT: s_waitcnt vmcnt(1) diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll new file mode 100644 index 0000000..b5bb68e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll @@ -0,0 +1,372 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,SDAG %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GISEL %s + +define amdgpu_ps float @s_load_b32_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b32_idxprom: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxprom + %ret = load float, ptr addrspace(4) %arrayidx, align 4 + ret float %ret +} + +; 'i32 %idx' is a signed index while SMRD soffset is unsigned, thus it is not selected. + +define amdgpu_ps float @s_load_b32_idx32(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; SDAG-LABEL: s_load_b32_idx32: +; SDAG: ; %bb.0: ; %entry +; SDAG-NEXT: s_ashr_i32 s3, s2, 31 +; SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; SDAG-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; SDAG-NEXT: s_load_b32 s0, s[0:1], 0x0 +; SDAG-NEXT: s_wait_kmcnt 0x0 +; SDAG-NEXT: v_mov_b32_e32 v0, s0 +; SDAG-NEXT: ; return to shader part epilog +; +; GISEL-LABEL: s_load_b32_idx32: +; GISEL: ; %bb.0: ; %entry +; GISEL-NEXT: s_ashr_i32 s3, s2, 31 +; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GISEL-NEXT: s_lshl_b64 s[2:3], s[2:3], 2 +; GISEL-NEXT: s_add_co_u32 s0, s0, s2 +; GISEL-NEXT: s_add_co_ci_u32 s1, s1, s3 +; GISEL-NEXT: s_load_b32 s0, s[0:1], 0x0 +; GISEL-NEXT: s_wait_kmcnt 0x0 +; GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GISEL-NEXT: ; return to shader part epilog +entry: + %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i32 %idx + %ret = load float, ptr addrspace(4) %arrayidx, align 4 + ret float %ret +} + +define amdgpu_ps float @s_load_b32_idxprom_wrong_stride(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; SDAG-LABEL: s_load_b32_idxprom_wrong_stride: +; SDAG: ; %bb.0: ; %entry +; SDAG-NEXT: s_mov_b32 s3, 0 +; SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; SDAG-NEXT: s_lshl_b64 s[2:3], s[2:3], 3 +; SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; SDAG-NEXT: s_load_b32 s0, s[0:1], 0x0 +; SDAG-NEXT: s_wait_kmcnt 0x0 +; SDAG-NEXT: v_mov_b32_e32 v0, s0 +; SDAG-NEXT: ; return to shader part epilog +; +; GISEL-LABEL: s_load_b32_idxprom_wrong_stride: +; GISEL: ; %bb.0: ; %entry +; GISEL-NEXT: s_mov_b32 s3, 0 +; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GISEL-NEXT: s_lshl_b64 s[2:3], s[2:3], 3 +; GISEL-NEXT: s_add_co_u32 s0, s0, s2 +; GISEL-NEXT: s_add_co_ci_u32 s1, s1, s3 +; GISEL-NEXT: s_load_b32 s0, s[0:1], 0x0 +; GISEL-NEXT: s_wait_kmcnt 0x0 +; GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GISEL-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load float, ptr addrspace(4) %arrayidx, align 4 + ret float %ret +} + +define amdgpu_ps float @s_load_b16_idxprom_ioffset(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b16_idxprom_ioffset: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x20 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %idxadd = add i64 %idxprom, 16 + %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxadd + %ld = load i16, ptr addrspace(4) %arrayidx, align 2 + %ret.i32 = zext i16 %ld to i32 + %ret = bitcast i32 %ret.i32 to float + ret float %ret +} + +define amdgpu_ps <2 x float> @s_load_b64_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b64_idxprom: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <2 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <2 x float> %ret +} + +define amdgpu_ps <3 x float> @s_load_b96_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b96_idxprom: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(4) %p, i64 %idxprom + %ret = load <3 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <3 x float> %ret +} + +define amdgpu_ps <4 x float> @s_load_b128_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b128_idxprom: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <4 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <4 x float> %ret +} + +define amdgpu_ps <8 x float> @s_load_b256_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b256_idxprom: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 +; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <8 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <8 x float> %ret +} + +define amdgpu_ps <16 x float> @s_load_b512_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { +; GCN-LABEL: s_load_b512_idxprom: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 +; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 +; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 +; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 +; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 +; GCN-NEXT: ; return to shader part epilog +entry: + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <16 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <16 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <16 x float> %ret +} + +define amdgpu_ps float @s_load_b32_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b32_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxprom + %ret = load float, ptr addrspace(4) %arrayidx, align 4 + ret float %ret +} + +define amdgpu_ps float @s_load_b32_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b32_idxprom_range_ioffset: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x40 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %idxadd = add i64 %idxprom, 16 + %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxadd + %ret = load float, ptr addrspace(4) %arrayidx, align 4 + ret float %ret +} + +; Note: this is a byte load, there is nothing to scale + +define amdgpu_ps float @s_load_b8_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b8_idxprom_range_ioffset: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_u8 s0, s[0:1], s2 offset:0x10 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %idxadd = add i64 %idxprom, 16 + %arrayidx = getelementptr inbounds i8, ptr addrspace(4) %p, i64 %idxadd + %ld = load i8, ptr addrspace(4) %arrayidx + %ret.i32 = zext i8 %ld to i32 + %ret = bitcast i32 %ret.i32 to float + ret float %ret +} + +define amdgpu_ps float @s_load_b16_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b16_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxprom + %ld = load i16, ptr addrspace(4) %arrayidx, align 2 + %ret.i32 = zext i16 %ld to i32 + %ret = bitcast i32 %ret.i32 to float + ret float %ret +} + +define amdgpu_ps float @s_load_b16_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b16_idxprom_range_ioffset: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x20 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %idxadd = add i64 %idxprom, 16 + %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxadd + %ld = load i16, ptr addrspace(4) %arrayidx, align 2 + %ret.i32 = zext i16 %ld to i32 + %ret = bitcast i32 %ret.i32 to float + ret float %ret +} + +define amdgpu_ps <2 x float> @s_load_b64_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b64_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <2 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <2 x float> %ret +} + +define amdgpu_ps <3 x float> @s_load_b96_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b96_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(4) %p, i64 %idxprom + %ret = load <3 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <3 x float> %ret +} + +define amdgpu_ps <4 x float> @s_load_b128_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b128_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <4 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <4 x float> %ret +} + +define amdgpu_ps <8 x float> @s_load_b256_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b256_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 +; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <8 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <8 x float> %ret +} + +define amdgpu_ps <16 x float> @s_load_b512_idxprom_range(ptr addrspace(4) align 4 inreg %p) { +; GCN-LABEL: s_load_b512_idxprom_range: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_wait_kmcnt 0x0 +; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 +; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 +; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 +; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 +; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 +; GCN-NEXT: ; return to shader part epilog +entry: + %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 + %idxprom = zext i32 %idx to i64 + %arrayidx = getelementptr inbounds <16 x float>, ptr addrspace(4) %p, i64 %idxprom + %ret = load <16 x float>, ptr addrspace(4) %arrayidx, align 4 + ret <16 x float> %ret +} + +!0 = !{i32 0, i32 1024} diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll index a6b8ea3..6da7d1b 100644 --- a/llvm/test/CodeGen/AMDGPU/srem.ll +++ b/llvm/test/CodeGen/AMDGPU/srem.ll @@ -1819,7 +1819,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) ; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1 ; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v2 ; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 -; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0 ; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 ; TAHITI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 @@ -6232,7 +6232,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_mul_hi_u32 v8, v14, v8 ; TONGA-NEXT: v_mul_lo_u32 v8, v8, v10 ; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v14, v8 -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v8, v10 +; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, v10, v8 ; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10 ; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc ; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v8, v10 diff --git a/llvm/test/CodeGen/AMDGPU/wmma-coececution-valu-hazards.mir b/llvm/test/CodeGen/AMDGPU/wmma-coececution-valu-hazards.mir new file mode 100644 index 0000000..2f7a6e2 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/wmma-coececution-valu-hazards.mir @@ -0,0 +1,902 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=GFX1250 %s + +# WMMA writes: D0, WMMA reads: A0/B0/Index0 +# VALU writes: D1, VALU reads: Use1 +# Hards could be: +# RAW: D0 overlaps Use1 +# WAW: D0 overlaps D1 +# WAR: A0/B0/Index0 overlaps D1 + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr25 = V_ADD_F32_e32 $vgpr24, $vgpr16, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr25 = V_ADD_F32_e32 $vgpr24, $vgpr16, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Use1_with_4_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Use1_with_4_valus_in_between + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr26 = V_MOV_B32_e32 26, implicit $exec + ; GFX1250-NEXT: $vgpr27 = V_MOV_B32_e32 27, implicit $exec + ; GFX1250-NEXT: $vgpr28 = V_MOV_B32_e32 28, implicit $exec + ; GFX1250-NEXT: $vgpr29 = V_MOV_B32_e32 29, implicit $exec + ; GFX1250-NEXT: $vgpr25 = V_ADD_F32_e32 $vgpr24, $vgpr16, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr26 = V_MOV_B32_e32 26, implicit $exec + $vgpr27 = V_MOV_B32_e32 27, implicit $exec + $vgpr28 = V_MOV_B32_e32 28, implicit $exec + $vgpr29 = V_MOV_B32_e32 29, implicit $exec + $vgpr25 = V_ADD_F32_e32 $vgpr24, $vgpr16, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Use1_with_4_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Use1_with_4_salus_in_between + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: $sgpr2 = S_MOV_B32 2 + ; GFX1250-NEXT: $sgpr3 = S_MOV_B32 3 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr25 = V_ADD_F32_e32 $vgpr24, $vgpr16, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 2 + $sgpr3 = S_MOV_B32 3 + $vgpr25 = V_ADD_F32_e32 $vgpr24, $vgpr16, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr16 = V_ADD_F32_e32 $vgpr24, $vgpr25, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr16 = V_ADD_F32_e32 $vgpr24, $vgpr25, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr24, $vgpr25, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr24, $vgpr25, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr8 = V_ADD_F32_e32 $vgpr24, $vgpr25, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr8 = V_ADD_F32_e32 $vgpr24, $vgpr25, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr27 = V_ADD_F32_e32 $vgpr22, $vgpr26, implicit $mode, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr27 = V_ADD_F32_e32 $vgpr22, $vgpr26, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Use1_with_4_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Use1_with_4_valus_in_between + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr26 = V_MOV_B32_e32 26, implicit $exec + ; GFX1250-NEXT: $vgpr27 = V_MOV_B32_e32 27, implicit $exec + ; GFX1250-NEXT: $vgpr28 = V_MOV_B32_e32 28, implicit $exec + ; GFX1250-NEXT: $vgpr29 = V_MOV_B32_e32 29, implicit $exec + ; GFX1250-NEXT: $vgpr31 = V_ADD_F32_e32 $vgpr22, $vgpr30, implicit $mode, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr26 = V_MOV_B32_e32 26, implicit $exec + $vgpr27 = V_MOV_B32_e32 27, implicit $exec + $vgpr28 = V_MOV_B32_e32 28, implicit $exec + $vgpr29 = V_MOV_B32_e32 29, implicit $exec + $vgpr31 = V_ADD_F32_e32 $vgpr22, $vgpr30, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Use1_with_4_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Use1_with_4_salus_in_between + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: $sgpr2 = S_MOV_B32 2 + ; GFX1250-NEXT: $sgpr3 = S_MOV_B32 3 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr27 = V_ADD_F32_e32 $vgpr22, $vgpr26, implicit $mode, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 2 + $sgpr3 = S_MOV_B32 3 + $vgpr27 = V_ADD_F32_e32 $vgpr22, $vgpr26, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr22 = V_ADD_F32_e32 $vgpr26, $vgpr27, implicit $mode, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr22 = V_ADD_F32_e32 $vgpr26, $vgpr27, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr26, $vgpr27, implicit $mode, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr26, $vgpr27, implicit $mode, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr8 = V_ADD_F32_e32 $vgpr26, $vgpr27, implicit $mode, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr8 = V_ADD_F32_e32 $vgpr26, $vgpr27, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_Use1_with_4_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_Use1_with_4_valus_in_between + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = V_MOV_B32_e32 40, implicit $exec + ; GFX1250-NEXT: $vgpr41 = V_MOV_B32_e32 41, implicit $exec + ; GFX1250-NEXT: $vgpr42 = V_MOV_B32_e32 42, implicit $exec + ; GFX1250-NEXT: $vgpr43 = V_MOV_B32_e32 43, implicit $exec + ; GFX1250-NEXT: $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr40 = V_MOV_B32_e32 40, implicit $exec + $vgpr41 = V_MOV_B32_e32 41, implicit $exec + $vgpr42 = V_MOV_B32_e32 42, implicit $exec + $vgpr43 = V_MOV_B32_e32 43, implicit $exec + $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_Use1_with_4_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_Use1_with_4_salus_in_between + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: $sgpr2 = S_MOV_B32 2 + ; GFX1250-NEXT: $sgpr3 = S_MOV_B32 3 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 2 + $sgpr3 = S_MOV_B32 3 + $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_NoF8_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr32 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_NoF8_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_NoF8_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_NoF8_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_NoF8_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr16 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr16 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr16, $vgpr33, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr32 = V_ADD_F32_e32 $vgpr16, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Use1_with_8_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Use1_with_8_valus_in_between + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = V_MOV_B32_e32 40, implicit $exec + ; GFX1250-NEXT: $vgpr41 = V_MOV_B32_e32 41, implicit $exec + ; GFX1250-NEXT: $vgpr42 = V_MOV_B32_e32 42, implicit $exec + ; GFX1250-NEXT: $vgpr43 = V_MOV_B32_e32 43, implicit $exec + ; GFX1250-NEXT: $vgpr44 = V_MOV_B32_e32 44, implicit $exec + ; GFX1250-NEXT: $vgpr45 = V_MOV_B32_e32 45, implicit $exec + ; GFX1250-NEXT: $vgpr46 = V_MOV_B32_e32 46, implicit $exec + ; GFX1250-NEXT: $vgpr47 = V_MOV_B32_e32 47, implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr16, $vgpr33, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr40 = V_MOV_B32_e32 40, implicit $exec + $vgpr41 = V_MOV_B32_e32 41, implicit $exec + $vgpr42 = V_MOV_B32_e32 42, implicit $exec + $vgpr43 = V_MOV_B32_e32 43, implicit $exec + $vgpr44 = V_MOV_B32_e32 44, implicit $exec + $vgpr45 = V_MOV_B32_e32 45, implicit $exec + $vgpr46 = V_MOV_B32_e32 46, implicit $exec + $vgpr47 = V_MOV_B32_e32 47, implicit $exec + $vgpr32 = V_ADD_F32_e32 $vgpr16, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Use1_with_8_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Use1_with_8_salus_in_between + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: $sgpr2 = S_MOV_B32 2 + ; GFX1250-NEXT: $sgpr3 = S_MOV_B32 3 + ; GFX1250-NEXT: $sgpr4 = S_MOV_B32 4 + ; GFX1250-NEXT: $sgpr5 = S_MOV_B32 5 + ; GFX1250-NEXT: $sgpr6 = S_MOV_B32 6 + ; GFX1250-NEXT: $sgpr7 = S_MOV_B32 7 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr16, $vgpr33, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 2 + $sgpr3 = S_MOV_B32 3 + $sgpr4 = S_MOV_B32 4 + $sgpr5 = S_MOV_B32 5 + $sgpr6 = S_MOV_B32 6 + $sgpr7 = S_MOV_B32 7 + $vgpr32 = V_ADD_F32_e32 $vgpr16, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr16 = V_ADD_F32_e32 $vgpr32, $vgpr33, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr16 = V_ADD_F32_e32 $vgpr32, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr32, $vgpr33, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr32, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr8 = V_ADD_F32_e32 $vgpr32, $vgpr33, implicit $mode, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr8 = V_ADD_F32_e32 $vgpr32, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 2, 0, 0, implicit $exec + $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Use1_with_8_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Use1_with_8_valus_in_between + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = V_MOV_B32_e32 40, implicit $exec + ; GFX1250-NEXT: $vgpr41 = V_MOV_B32_e32 41, implicit $exec + ; GFX1250-NEXT: $vgpr42 = V_MOV_B32_e32 42, implicit $exec + ; GFX1250-NEXT: $vgpr43 = V_MOV_B32_e32 43, implicit $exec + ; GFX1250-NEXT: $vgpr44 = V_MOV_B32_e32 44, implicit $exec + ; GFX1250-NEXT: $vgpr45 = V_MOV_B32_e32 45, implicit $exec + ; GFX1250-NEXT: $vgpr46 = V_MOV_B32_e32 46, implicit $exec + ; GFX1250-NEXT: $vgpr47 = V_MOV_B32_e32 47, implicit $exec + ; GFX1250-NEXT: $vgpr49 = V_ADD_F32_e32 $vgpr32, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 2, 0, 0, implicit $exec + $vgpr40 = V_MOV_B32_e32 40, implicit $exec + $vgpr41 = V_MOV_B32_e32 41, implicit $exec + $vgpr42 = V_MOV_B32_e32 42, implicit $exec + $vgpr43 = V_MOV_B32_e32 43, implicit $exec + $vgpr44 = V_MOV_B32_e32 44, implicit $exec + $vgpr45 = V_MOV_B32_e32 45, implicit $exec + $vgpr46 = V_MOV_B32_e32 46, implicit $exec + $vgpr47 = V_MOV_B32_e32 47, implicit $exec + $vgpr49 = V_ADD_F32_e32 $vgpr32, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Use1_with_8_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Use1_with_8_salus_in_between + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: $sgpr2 = S_MOV_B32 2 + ; GFX1250-NEXT: $sgpr3 = S_MOV_B32 3 + ; GFX1250-NEXT: $sgpr4 = S_MOV_B32 4 + ; GFX1250-NEXT: $sgpr5 = S_MOV_B32 5 + ; GFX1250-NEXT: $sgpr6 = S_MOV_B32 6 + ; GFX1250-NEXT: $sgpr7 = S_MOV_B32 7 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 2 + $sgpr3 = S_MOV_B32 3 + $sgpr4 = S_MOV_B32 4 + $sgpr5 = S_MOV_B32 5 + $sgpr6 = S_MOV_B32 6 + $sgpr7 = S_MOV_B32 7 + $vgpr48 = V_ADD_F32_e32 $vgpr32, $vgpr47, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $vgpr32 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr16 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $vgpr16 = V_ADD_F32_e32 $vgpr47, $vgpr48, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr34 = V_ADD_F32_e32 $vgpr24, $vgpr33, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $vgpr34 = V_ADD_F32_e32 $vgpr24, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Use1_with_2_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Use1_with_2_valus_in_between + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = V_MOV_B32_e32 40, implicit $exec + ; GFX1250-NEXT: $vgpr41 = V_MOV_B32_e32 41, implicit $exec + ; GFX1250-NEXT: $vgpr34 = V_ADD_F32_e32 $vgpr24, $vgpr33, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $vgpr40 = V_MOV_B32_e32 40, implicit $exec + $vgpr41 = V_MOV_B32_e32 41, implicit $exec + $vgpr34 = V_ADD_F32_e32 $vgpr24, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Use1_with_2_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Use1_with_2_salus_in_between + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr34 = V_ADD_F32_e32 $vgpr24, $vgpr33, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $vgpr34 = V_ADD_F32_e32 $vgpr24, $vgpr33, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr24 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $vgpr24 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr8 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $vgpr8 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_Index0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_Index0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32, 0, 0, 0, 0, 0, implicit $exec + $vgpr32 = V_ADD_F32_e32 $vgpr33, $vgpr34, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr31 = V_ADD_F32_e32 $vgpr24, $vgpr30, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $vgpr31 = V_ADD_F32_e32 $vgpr24, $vgpr30, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Use1_with_2_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Use1_with_2_valus_in_between + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = V_MOV_B32_e32 40, implicit $exec + ; GFX1250-NEXT: $vgpr41 = V_MOV_B32_e32 41, implicit $exec + ; GFX1250-NEXT: $vgpr31 = V_ADD_F32_e32 $vgpr24, $vgpr30, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $vgpr40 = V_MOV_B32_e32 40, implicit $exec + $vgpr41 = V_MOV_B32_e32 41, implicit $exec + $vgpr31 = V_ADD_F32_e32 $vgpr24, $vgpr30, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Use1_with_2_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Use1_with_2_salus_in_between + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr31 = V_ADD_F32_e32 $vgpr24, $vgpr30, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $vgpr31 = V_ADD_F32_e32 $vgpr24, $vgpr30, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr24 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $vgpr24 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr8 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $vgpr8 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_Index0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_Index0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr28 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr28_vgpr29, 0, 0, 0, implicit $exec + $vgpr28 = V_ADD_F32_e32 $vgpr30, $vgpr31, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Use1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Use1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr34 = V_ADD_F32_e32 $vgpr33, $vgpr24, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr34 = V_ADD_F32_e32 $vgpr33, $vgpr24, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Use1_with_4_valus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Use1_with_4_valus_in_between + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = V_MOV_B32_e32 40, implicit $exec + ; GFX1250-NEXT: $vgpr41 = V_MOV_B32_e32 41, implicit $exec + ; GFX1250-NEXT: $vgpr42 = V_MOV_B32_e32 42, implicit $exec + ; GFX1250-NEXT: $vgpr43 = V_MOV_B32_e32 43, implicit $exec + ; GFX1250-NEXT: $vgpr34 = V_ADD_F32_e32 $vgpr34, $vgpr24, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr40 = V_MOV_B32_e32 40, implicit $exec + $vgpr41 = V_MOV_B32_e32 41, implicit $exec + $vgpr42 = V_MOV_B32_e32 42, implicit $exec + $vgpr43 = V_MOV_B32_e32 43, implicit $exec + $vgpr34 = V_ADD_F32_e32 $vgpr34, $vgpr24, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Use1_with_4_salus_in_between +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Use1_with_4_salus_in_between + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $sgpr0 = S_MOV_B32 0 + ; GFX1250-NEXT: $sgpr1 = S_MOV_B32 1 + ; GFX1250-NEXT: $sgpr2 = S_MOV_B32 2 + ; GFX1250-NEXT: $sgpr3 = S_MOV_B32 4 + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr34 = V_ADD_F32_e32 $vgpr34, $vgpr24, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $sgpr0 = S_MOV_B32 0 + $sgpr1 = S_MOV_B32 1 + $sgpr2 = S_MOV_B32 2 + $sgpr3 = S_MOV_B32 4 + $vgpr34 = V_ADD_F32_e32 $vgpr34, $vgpr24, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr24 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr24 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_A0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_A0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr0 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr0 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_B0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_B0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr8 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr8 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_Index0_overlaps_D1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_Index0_overlaps_D1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: $vgpr32 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr32 = V_ADD_F32_e32 $vgpr34, $vgpr35, implicit $mode, implicit $exec +... diff --git a/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir b/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir new file mode 100644 index 0000000..2032b98 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir @@ -0,0 +1,1430 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=GFX1250 %s + +# For two conscutive wmma instructions, we need to insert one V_NOP instruction between +# them if matrix A, B or index of the second wmma are the same or overlap with previous +# wmma instruction’s D-matrix. + +--- +name: test_wmma_f32_16x16x4_f32_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x4_f32_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr0_vgpr1, 8, killed $vgpr2_vgpr3, 8, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr4_vgpr5, 8, killed $vgpr16_vgpr17, 8, killed $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr0_vgpr1, 8, killed $vgpr2_vgpr3, 8, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11, 0, 0, 0, 0, implicit $exec + $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr4_vgpr5, 8, killed $vgpr16_vgpr17, 8, killed $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x4_f32_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x4_f32_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr0_vgpr1, 8, killed $vgpr2_vgpr3, 8, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr14_vgpr15, 8, killed $vgpr4_vgpr5, 8, killed $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr0_vgpr1, 8, killed $vgpr2_vgpr3, 8, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11, 0, 0, 0, 0, implicit $exec + $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr14_vgpr15, 8, killed $vgpr4_vgpr5, 8, killed $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x4_f32_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x4_f32_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr0_vgpr1, 8, killed $vgpr2_vgpr3, 8, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr4_vgpr5, 0, 0, 0, implicit $exec + $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11 = V_WMMA_F32_16X16X4_F32_w32_twoaddr 8, killed $vgpr0_vgpr1, 8, killed $vgpr2_vgpr3, 8, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr4_vgpr5, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_bf16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_bf16_16x16x32_bf16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_bf16_16x16x32_bf16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_bf16_16x16x32_bf16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_bf16_16x16x32_bf16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_bf16_16x16x32_bf16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_bf16_16x16x32_bf16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_BF16_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_bf16f32_16x16x32_bf16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_bf16f32_16x16x32_bf16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr26_vgpr27_vgpr28_vgpr29 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr0_vgpr1_vgpr2_vgpr3 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 9, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr26_vgpr27_vgpr28_vgpr29 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr0_vgpr1_vgpr2_vgpr3 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 9, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_bf16f32_16x16x32_bf16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_bf16f32_16x16x32_bf16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr34_vgpr35_vgpr36_vgpr37 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr0_vgpr1_vgpr2_vgpr3 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 9, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr34_vgpr35_vgpr36_vgpr37 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr0_vgpr1_vgpr2_vgpr3 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 9, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_bf16f32_16x16x32_bf16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_bf16f32_16x16x32_bf16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr26_vgpr27_vgpr28_vgpr29 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr26_vgpr27, 0, 0, 0, implicit $exec + $vgpr26_vgpr27_vgpr28_vgpr29 = V_WMMA_BF16F32_16X16X32_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr26_vgpr27, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_fp8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_fp8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_fp8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_fp8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_fp8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_fp8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_fp8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_fp8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_fp8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_fp8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_fp8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_fp8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_bf8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_bf8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_bf8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_bf8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_bf8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_bf8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_bf8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_bf8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_bf8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_bf8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x64_bf8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x64_bf8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fp8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fp8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fb8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fb8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_bf8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_bf8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_fb8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_fb8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_bf8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_bf8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_bf8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_bf8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x64_bf8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x64_bf8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X64_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_I32_16x16x64_IU8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_I32_16X16X64_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_f16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_f16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_f16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_f16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 8, killed $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f32_16x16x32_f16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f32_16x16x32_f16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec + $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 = V_WMMA_F32_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr16_vgpr17, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x32_f16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x32_f16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x32_f16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x32_f16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec + $vgpr30_vgpr31_vgpr32_vgpr33 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, 0, implicit $exec + $vgpr38_vgpr39_vgpr40_vgpr41 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29, 8, killed $vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37, 8, killed $vgpr38_vgpr39_vgpr40_vgpr41, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_f16_16x16x32_f16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_f16_16x16x32_f16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec + $vgpr22_vgpr23_vgpr24_vgpr25 = V_WMMA_F16_16X16X32_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 8, killed $vgpr22_vgpr23_vgpr24_vgpr25, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr22_vgpr23, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, 2, 2, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_bf16_16x16x64_bf16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_bf16_16x16x64_bf16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_bf16_16x16x64_bf16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_bf16_16x16x64_bf16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr32_vgpr33_vgpr34_vgpr35, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr32_vgpr33_vgpr34_vgpr35, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_bf16_16x16x64_bf16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_bf16_16x16x64_bf16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35 = V_SWMMAC_BF16_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_bf16f32_16x16x64_bf16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_bf16f32_16x16x64_bf16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_bf16f32_16x16x64_bf16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_bf16f32_16x16x64_bf16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_bf16f32_16x16x64_bf16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_bf16f32_16x16x64_bf16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_BF16F32_16X16X64_BF16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_fp8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_fp8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_fp8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_fp8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_fp8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_fp8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_fp8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_fp8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_fp8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_fp8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_fp8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_fp8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_bf8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_bf8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_bf8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_bf8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_bf8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_bf8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_bf8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_bf8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_bf8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_bf8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x128_bf8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x128_bf8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_fp8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_fp8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_fp8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_fp8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_fp8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_fp8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_fp8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_fp8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_fp8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_fp8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_fp8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_fp8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_BF8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_fp8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_BF8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_bf8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_bf8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_bf8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_bf8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51, killed $vgpr90_vgpr91, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x128_bf8_bf8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x128_bf8_bf8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88_vgpr89, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_BF8_BF8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr90_vgpr91, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr90_vgpr91, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr90_vgpr91, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, 0, implicit $exec + $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr90_vgpr91, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_i32_16x16x128_iu8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_I32_16X16X128_IU8_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88_vgpr89, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr24_vgpr25, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_f16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_f16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_f16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_f16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f32_16x16x64_f16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f32_16x16x64_f16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_SWMMAC_F32_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x64_f16_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x64_f16_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x64_f16_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x64_f16_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr64_vgpr65_vgpr66_vgpr67, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr64_vgpr65_vgpr66_vgpr67, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec +... + +--- +name: test_swmmac_f16_16x16x64_f16_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_swmmac_f16_16x16x64_f16_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec + $vgpr24_vgpr25_vgpr26_vgpr27 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr24_vgpr25_vgpr26_vgpr27, killed $vgpr88, 0, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67 = V_SWMMAC_F16_16X16X64_F16_w32_twoaddr 8, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, 8, killed $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23, killed $vgpr64_vgpr65_vgpr66_vgpr67, killed $vgpr24, 0, 0, 0, 0, 0, implicit $exec +... diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll deleted file mode 100644 index 736c86e..0000000 --- a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll +++ /dev/null @@ -1,36 +0,0 @@ -; RUN: opt -S --passes="print-dx-shader-flags" 2>&1 %s | FileCheck %s -; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC - -target triple = "dxil-pc-shadermodel6.7-library" - -; CHECK: ; Combined Shader Flags for Module -; CHECK-NEXT: ; Shader Flags Value: 0x00000000 -; CHECK-NEXT: ; -; CHECK-NOT: ; Note: shader requires additional functionality: -; CHECK-NOT: ; 64-Bit integer -; CHECK-NOT: ; Note: extra DXIL module flags: -; CHECK-NOT: ; -; CHECK-NEXT: ; Shader Flags for Module Functions -; CHECK-NEXT: ; Function lifetimes : 0x00000000 - -define void @lifetimes() #0 { - %a = alloca [4 x i32], align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %a) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %a) - ret void -} - -; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64, ptr) #1 - -; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64, ptr) #1 - -attributes #0 = { convergent norecurse nounwind "hlsl.export"} -attributes #1 = { nounwind memory(argmem: readwrite) } - -; DXC: - Name: SFI0 -; DXC-NEXT: Size: 8 -; DXC-NOT: Flags: -; DXC-NOT: Int64Ops: true -; DXC: ... diff --git a/llvm/test/CodeGen/DirectX/UAddc.ll b/llvm/test/CodeGen/DirectX/UAddc.ll index 4b46b56..dd7aa23 100644 --- a/llvm/test/CodeGen/DirectX/UAddc.ll +++ b/llvm/test/CodeGen/DirectX/UAddc.ll @@ -35,14 +35,10 @@ define noundef <2 x i32> @test_UAddc_vec2(<2 x i32> noundef %a, <2 x i32> nounde ; CHECK-NEXT: [[UADDC_I1:%.*]] = call [[DX_TYPES_I32C]] @dx.op.binaryWithCarryOrBorrow.i32(i32 44, i32 [[A_I1]], i32 [[B_I1]]) #[[ATTR0]] ; CHECK-NEXT: [[CARRY_ELEM0:%.*]] = extractvalue [[DX_TYPES_I32C]] [[UADDC_I0]], 1 ; CHECK-NEXT: [[CARRY_ELEM1:%.*]] = extractvalue [[DX_TYPES_I32C]] [[UADDC_I1]], 1 -; CHECK-NEXT: [[CARRY_UPTO0:%.*]] = insertelement <2 x i1> poison, i1 [[CARRY_ELEM0]], i64 0 -; CHECK-NEXT: [[CARRY:%.*]] = insertelement <2 x i1> [[CARRY_UPTO0]], i1 [[CARRY_ELEM1]], i64 1 -; CHECK-NEXT: [[CARRY_I0:%.*]] = extractelement <2 x i1> [[CARRY]], i64 0 -; CHECK-NEXT: [[CARRY_I1:%.*]] = extractelement <2 x i1> [[CARRY]], i64 1 ; CHECK-NEXT: [[SUM_ELEM0:%.*]] = extractvalue [[DX_TYPES_I32C]] [[UADDC_I0]], 0 ; CHECK-NEXT: [[SUM_ELEM1:%.*]] = extractvalue [[DX_TYPES_I32C]] [[UADDC_I1]], 0 -; CHECK-NEXT: [[CARRY_ZEXT_I0:%.*]] = zext i1 [[CARRY_I0]] to i32 -; CHECK-NEXT: [[CARRY_ZEXT_I1:%.*]] = zext i1 [[CARRY_I1]] to i32 +; CHECK-NEXT: [[CARRY_ZEXT_I0:%.*]] = zext i1 [[CARRY_ELEM0]] to i32 +; CHECK-NEXT: [[CARRY_ZEXT_I1:%.*]] = zext i1 [[CARRY_ELEM1]] to i32 ; CHECK-NEXT: [[RESULT_I0:%.*]] = add i32 [[SUM_ELEM0]], [[CARRY_ZEXT_I0]] ; CHECK-NEXT: [[RESULT_I1:%.*]] = add i32 [[SUM_ELEM1]], [[CARRY_ZEXT_I1]] ; CHECK-NEXT: [[RESULT_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RESULT_I0]], i64 0 diff --git a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll index f77df2d..6552ccd 100644 --- a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll +++ b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll @@ -1,6 +1,5 @@ ; RUN: opt -S -passes='dxil-op-lower' -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s --check-prefixes=CHECK,CHECK-SM63 ; RUN: opt -S -passes='dxil-op-lower' -mtriple=dxil-pc-shadermodel6.6-library %s | FileCheck %s --check-prefixes=CHECK,CHECK-SM66 -; RUN: opt -S -dxil-op-lower -dxil-prepare -mtriple=dxil-pc-shadermodel6.6-library %s | FileCheck %s --check-prefixes=CHECK,CHECK-PREPARE ; CHECK-LABEL: define void @test_legal_lifetime() { ; @@ -16,14 +15,6 @@ ; CHECK-SM66-NEXT: store i32 0, ptr [[GEP]], align 4 ; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; -; CHECK-PREPARE-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4 -; CHECK-PREPARE-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[ACCUM_I_FLAT]], i32 0 -; CHECK-PREPARE-NEXT: [[BITCAST:%.*]] = bitcast ptr [[ACCUM_I_FLAT]] to ptr -; CHECK-PREPARE-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[BITCAST]]) -; CHECK-PREPARE-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-PREPARE-NEXT: [[BITCAST:%.*]] = bitcast ptr [[ACCUM_I_FLAT]] to ptr -; CHECK-PREPARE-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[BITCAST]]) -; ; CHECK-NEXT: ret void ; define void @test_legal_lifetime() { @@ -35,22 +26,6 @@ define void @test_legal_lifetime() { ret void } -; CHECK-PREPARE-DAG: attributes [[LIFETIME_ATTRS:#.*]] = { nounwind } - -; CHECK-PREPARE-DAG: ; Function Attrs: nounwind -; CHECK-PREPARE-DAG: declare void @llvm.lifetime.start.p0(i64, ptr) [[LIFETIME_ATTRS]] - -; CHECK-PREPARE-DAG: ; Function Attrs: nounwind -; CHECK-PREPARE-DAG: declare void @llvm.lifetime.end.p0(i64, ptr) [[LIFETIME_ATTRS]] - -; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64, ptr) #0 - -; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64, ptr) #0 - -attributes #0 = { nounwind memory(argmem: readwrite) } - ; Set the validator version to 1.6 !dx.valver = !{!0} !0 = !{i32 1, i32 6} diff --git a/llvm/test/CodeGen/Hexagon/swp-load-to-store-forward.mir b/llvm/test/CodeGen/Hexagon/swp-load-to-store-forward.mir new file mode 100644 index 0000000..2960343 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/swp-load-to-store-forward.mir @@ -0,0 +1,50 @@ +# RUN: llc -mtriple=hexagon -run-pass pipeliner %s -o /dev/null + +# Check that edges that violate topological order are not added to the +# SwingSchedulerDAG. This is a case where the crash was caused by PR 145878. + +--- | + target triple = "hexagon" + + define void @crash_145878() { + entry: + br label %loop + + loop: ; preds = %loop, %entry + %lsr.iv2 = phi i32 [ %lsr.iv.next, %loop ], [ 1, %entry ] + %lsr.iv = phi ptr [ %cgep3, %loop ], [ inttoptr (i32 -8 to ptr), %entry ] + %cgep = getelementptr i8, ptr %lsr.iv, i32 12 + %load = load i32, ptr %cgep, align 4 + store i32 %load, ptr %lsr.iv, align 4 + %lsr.iv.next = add nsw i32 %lsr.iv2, -1 + %iv.cmp.not = icmp eq i32 %lsr.iv.next, 0 + %cgep3 = getelementptr i8, ptr %lsr.iv, i32 -8 + br i1 %iv.cmp.not, label %exit, label %loop + + exit: ; preds = %loop + ret void + } +... +--- +name: crash_145878 +tracksRegLiveness: true +body: | + bb.0.entry: + successors: %bb.1(0x80000000) + + %5:intregs = A2_tfrsi -8 + J2_loop0i %bb.1, 1, implicit-def $lc0, implicit-def $sa0, implicit-def $usr + + bb.1.loop (machine-block-address-taken): + successors: %bb.2(0x04000000), %bb.1(0x7c000000) + + %1:intregs = PHI %5, %bb.0, %3, %bb.1 + %6:intregs = L2_loadri_io %1, 12 :: (load (s32) from %ir.cgep) + S2_storeri_io %1, 0, killed %6 :: (store (s32) into %ir.lsr.iv) + %3:intregs = A2_addi %1, -8 + ENDLOOP0 %bb.1, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0 + J2_jump %bb.2, implicit-def dead $pc + + bb.2.exit: + PS_jmpret $r31, implicit-def dead $pc +... diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll index f25e988..61a915a 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll @@ -467,22 +467,21 @@ entry: define void @buildvector_v8f32(ptr %dst, float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind { ; CHECK-LABEL: buildvector_v8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.s $a1, $fa0 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 0 -; CHECK-NEXT: movfr2gr.s $a1, $fa1 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 1 -; CHECK-NEXT: movfr2gr.s $a1, $fa2 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 2 -; CHECK-NEXT: movfr2gr.s $a1, $fa3 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 3 -; CHECK-NEXT: movfr2gr.s $a1, $fa4 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 4 -; CHECK-NEXT: movfr2gr.s $a1, $fa5 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 5 -; CHECK-NEXT: movfr2gr.s $a1, $fa6 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 6 -; CHECK-NEXT: movfr2gr.s $a1, $fa7 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 7 +; CHECK-NEXT: # kill: def $f7 killed $f7 def $xr7 +; CHECK-NEXT: # kill: def $f6 killed $f6 def $xr6 +; CHECK-NEXT: # kill: def $f5 killed $f5 def $xr5 +; CHECK-NEXT: # kill: def $f4 killed $f4 def $xr4 +; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3 +; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2 +; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 +; CHECK-NEXT: xvinsve0.w $xr0, $xr2, 2 +; CHECK-NEXT: xvinsve0.w $xr0, $xr3, 3 +; CHECK-NEXT: xvinsve0.w $xr0, $xr4, 4 +; CHECK-NEXT: xvinsve0.w $xr0, $xr5, 5 +; CHECK-NEXT: xvinsve0.w $xr0, $xr6, 6 +; CHECK-NEXT: xvinsve0.w $xr0, $xr7, 7 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -501,14 +500,13 @@ entry: define void @buildvector_v4f64(ptr %dst, double %a0, double %a1, double %a2, double %a3) nounwind { ; CHECK-LABEL: buildvector_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.d $a1, $fa0 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 0 -; CHECK-NEXT: movfr2gr.d $a1, $fa1 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 1 -; CHECK-NEXT: movfr2gr.d $a1, $fa2 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 2 -; CHECK-NEXT: movfr2gr.d $a1, $fa3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 3 +; CHECK-NEXT: # kill: def $f3_64 killed $f3_64 def $xr3 +; CHECK-NEXT: # kill: def $f2_64 killed $f2_64 def $xr2 +; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $xr1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 2 +; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 3 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll index 9528280..3800712 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll @@ -11,23 +11,22 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0 +; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 0 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1 +; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2 @@ -35,59 +34,60 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 2 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 3 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 3 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 4 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 4 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 5 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 5 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 6 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 7 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 7 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 96 @@ -105,45 +105,45 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: addi.d $sp, $sp, -96 ; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill ; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill -; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill +; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 ; CHECK-NEXT: movgr2fr.d $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 0 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill +; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 ; CHECK-NEXT: movgr2fr.d $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 ; CHECK-NEXT: movgr2fr.d $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 2 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 ; CHECK-NEXT: movgr2fr.d $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 3 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 96 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll index f154dd3..221aba3 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll @@ -6,15 +6,12 @@ define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: shufflevector_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: xvinsgr2vr.d $xr2, $a0, 0 ; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 2 -; CHECK-NEXT: xvinsgr2vr.d $xr2, $a0, 1 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr2, $a0, 2 +; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 3 +; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 +; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 2 ; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr2, $a0, 3 -; CHECK-NEXT: xvori.b $xr0, $xr2, 0 +; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3 ; CHECK-NEXT: ret entry: %c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 6, i32 3, i32 7> diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll index b24f95e..c1d4220 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll @@ -87,8 +87,8 @@ define void @insert_8xfloat(ptr %src, ptr %dst, float %in) nounwind { ; CHECK-LABEL: insert_8xfloat: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.w $xr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 ; CHECK-NEXT: xvst $xr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <8 x float>, ptr %src @@ -101,8 +101,8 @@ define void @insert_4xdouble(ptr %src, ptr %dst, double %in) nounwind { ; CHECK-LABEL: insert_4xdouble: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 1 ; CHECK-NEXT: xvst $xr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <4 x double>, ptr %src diff --git a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll index 7a52531..62ea5cb 100644 --- a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll +++ b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll @@ -196,21 +196,20 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) #0 { ; LA64-NEXT: addi.d $sp, $sp, -48 ; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill -; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 +; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(exp10) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload -; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 +; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(exp10) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr0, $vr1, 16 ; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 48 ; LA64-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/llvm.sincos.ll b/llvm/test/CodeGen/LoongArch/llvm.sincos.ll index 648c19d..383d63c 100644 --- a/llvm/test/CodeGen/LoongArch/llvm.sincos.ll +++ b/llvm/test/CodeGen/LoongArch/llvm.sincos.ll @@ -571,39 +571,37 @@ define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 { ; LA64-NEXT: addi.d $sp, $sp, -80 ; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 -; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill +; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 +; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 +; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr0, $vr1, 16 +; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1 -; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill -; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload -; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: fmov.d $fa1, $fa0 ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr1, $vr0, 16 +; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 80 ; LA64-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll index d84e408..afc87d1 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll @@ -334,14 +334,13 @@ entry: define void @buildvector_v4f32(ptr %dst, float %a0, float %a1, float %a2, float %a3) nounwind { ; CHECK-LABEL: buildvector_v4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.s $a1, $fa0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0 -; CHECK-NEXT: movfr2gr.s $a1, $fa1 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 1 -; CHECK-NEXT: movfr2gr.s $a1, $fa2 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 2 -; CHECK-NEXT: movfr2gr.s $a1, $fa3 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 3 +; CHECK-NEXT: # kill: def $f3 killed $f3 def $vr3 +; CHECK-NEXT: # kill: def $f2 killed $f2 def $vr2 +; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vextrins.w $vr0, $vr1, 16 +; CHECK-NEXT: vextrins.w $vr0, $vr2, 32 +; CHECK-NEXT: vextrins.w $vr0, $vr3, 48 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -356,10 +355,9 @@ entry: define void @buildvector_v2f64(ptr %dst, double %a0, double %a1) nounwind { ; CHECK-LABEL: buildvector_v2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.d $a1, $fa0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 0 -; CHECK-NEXT: movfr2gr.d $a1, $fa1 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 1 +; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $vr1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vextrins.d $vr0, $vr1, 16 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll index aafef07..735dad4 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll @@ -9,45 +9,45 @@ define <4 x float> @powi_v4f32(<4 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: addi.d $sp, $sp, -48 ; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill ; CHECK-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill -; CHECK-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill +; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 1 ; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload -; CHECK-NEXT: vreplvei.w $vr0, $vr0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill +; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 ; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 0 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.w $vr0, $vr1, 16 +; CHECK-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 1 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; CHECK-NEXT: vreplvei.w $vr0, $vr0, 2 ; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 0 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.w $vr1, $vr0, 32 +; CHECK-NEXT: vst $vr1, $sp, 0 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 2 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3 ; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 3 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 0 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.w $vr1, $vr0, 48 +; CHECK-NEXT: vori.b $vr0, $vr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 48 @@ -67,23 +67,22 @@ define <2 x double> @powi_v2f64(<2 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill ; CHECK-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 1 ; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 ; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload -; CHECK-NEXT: vreplvei.d $vr0, $vr0, 1 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 ; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.d $vr0, $vr1, 16 ; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 48 diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll index 7f23207..c73252b 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll @@ -57,8 +57,8 @@ define void @insert_4xfloat(ptr %src, ptr %dst, float %ins) nounwind { ; CHECK-LABEL: insert_4xfloat: ; CHECK: # %bb.0: ; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vextrins.w $vr1, $vr0, 16 ; CHECK-NEXT: vst $vr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <4 x float>, ptr %src @@ -71,8 +71,8 @@ define void @insert_2xdouble(ptr %src, ptr %dst, double %ins) nounwind { ; CHECK-LABEL: insert_2xdouble: ; CHECK: # %bb.0: ; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vextrins.d $vr1, $vr0, 16 ; CHECK-NEXT: vst $vr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <2 x double>, ptr %src diff --git a/llvm/test/CodeGen/LoongArch/target-abi-from-triple-edge-cases.ll b/llvm/test/CodeGen/LoongArch/target-abi-from-triple-edge-cases.ll index eb656ad..6e9d26a 100644 --- a/llvm/test/CodeGen/LoongArch/target-abi-from-triple-edge-cases.ll +++ b/llvm/test/CodeGen/LoongArch/target-abi-from-triple-edge-cases.ll @@ -24,9 +24,9 @@ ; NO-WARNING-NOT: warning: triple-implied ABI conflicts with provided target-abi 'lp64d', using target-abi ;; Check that ILP32-on-LA64 and LP64-on-LA32 combinations are handled properly. -; RUN: llc --mtriple=loongarch64 --target-abi=ilp32d --mattr=+d < %s 2>&1 \ +; RUN: llc --mtriple=loongarch64-linux-gnu --target-abi=ilp32d --mattr=+d < %s 2>&1 \ ; RUN: | FileCheck %s --check-prefixes=LP64D,32ON64 -; RUN: llc --mtriple=loongarch32 --target-abi=lp64d --mattr=+d < %s 2>&1 \ +; RUN: llc --mtriple=loongarch32-linux-gnu --target-abi=lp64d --mattr=+d < %s 2>&1 \ ; RUN: | FileCheck %s --check-prefixes=ILP32D,64ON32 ; 32ON64: warning: 32-bit ABIs are not supported for 64-bit targets, ignoring and using triple-implied ABI @@ -49,12 +49,6 @@ ; LP64D-LP64F-NOF: warning: both target-abi and the triple-implied ABI are invalid, ignoring and using feature-implied ABI -;; Check that triple-implied ABI are invalid, use feature-implied ABI -; RUN: llc --mtriple=loongarch64 --mattr=-f < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=LP64S,LP64D-NONE-NOF - -; LP64D-NONE-NOF: warning: the triple-implied ABI is invalid, ignoring and using feature-implied ABI - define float @f(float %a) { ; ILP32D-LABEL: f: ; ILP32D: # %bb.0: diff --git a/llvm/test/CodeGen/NVPTX/i1-select.ll b/llvm/test/CodeGen/NVPTX/i1-select.ll index f1adc34..9a051b3 100644 --- a/llvm/test/CodeGen/NVPTX/i1-select.ll +++ b/llvm/test/CodeGen/NVPTX/i1-select.ll @@ -94,27 +94,27 @@ define i32 @test_select_i1_basic(i32 %v1, i32 %v2, i32 %v3, i32 %true, i32 %fals define i32 @test_select_i1_basic_folding(i32 %v1, i32 %v2, i32 %v3, i32 %true, i32 %false) { ; CHECK-LABEL: test_select_i1_basic_folding( ; CHECK: { -; CHECK-NEXT: .reg .pred %p<12>; -; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .pred %p<13>; +; CHECK-NEXT: .reg .b32 %r<7>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b32 %r1, [test_select_i1_basic_folding_param_0]; ; CHECK-NEXT: setp.eq.b32 %p1, %r1, 0; -; CHECK-NEXT: ld.param.b32 %r3, [test_select_i1_basic_folding_param_1]; -; CHECK-NEXT: setp.ne.b32 %p2, %r3, 0; -; CHECK-NEXT: setp.eq.b32 %p3, %r3, 0; -; CHECK-NEXT: ld.param.b32 %r5, [test_select_i1_basic_folding_param_2]; -; CHECK-NEXT: setp.eq.b32 %p4, %r5, 0; -; CHECK-NEXT: ld.param.b32 %r6, [test_select_i1_basic_folding_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [test_select_i1_basic_folding_param_1]; +; CHECK-NEXT: setp.ne.b32 %p2, %r2, 0; +; CHECK-NEXT: setp.eq.b32 %p3, %r2, 0; +; CHECK-NEXT: ld.param.b32 %r3, [test_select_i1_basic_folding_param_2]; +; CHECK-NEXT: setp.eq.b32 %p4, %r3, 0; +; CHECK-NEXT: ld.param.b32 %r4, [test_select_i1_basic_folding_param_3]; ; CHECK-NEXT: xor.pred %p6, %p1, %p3; -; CHECK-NEXT: ld.param.b32 %r7, [test_select_i1_basic_folding_param_4]; +; CHECK-NEXT: ld.param.b32 %r5, [test_select_i1_basic_folding_param_4]; ; CHECK-NEXT: and.pred %p7, %p6, %p4; -; CHECK-NEXT: and.pred %p8, %p2, %p4; -; CHECK-NEXT: and.pred %p9, %p3, %p7; -; CHECK-NEXT: or.pred %p10, %p9, %p8; -; CHECK-NEXT: xor.pred %p11, %p10, %p3; -; CHECK-NEXT: selp.b32 %r8, %r6, %r7, %p11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: and.pred %p9, %p2, %p4; +; CHECK-NEXT: and.pred %p10, %p3, %p7; +; CHECK-NEXT: or.pred %p11, %p10, %p9; +; CHECK-NEXT: xor.pred %p12, %p11, %p3; +; CHECK-NEXT: selp.b32 %r6, %r4, %r5, %p12; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; %b1 = icmp eq i32 %v1, 0 %b2 = icmp eq i32 %v2, 0 diff --git a/llvm/test/CodeGen/NVPTX/i128.ll b/llvm/test/CodeGen/NVPTX/i128.ll index f2211eb..44d8558 100644 --- a/llvm/test/CodeGen/NVPTX/i128.ll +++ b/llvm/test/CodeGen/NVPTX/i128.ll @@ -5,9 +5,9 @@ define i128 @srem_i128(i128 %lhs, i128 %rhs) { ; CHECK-LABEL: srem_i128( ; CHECK: { -; CHECK-NEXT: .reg .pred %p<22>; +; CHECK-NEXT: .reg .pred %p<20>; ; CHECK-NEXT: .reg .b32 %r<12>; -; CHECK-NEXT: .reg .b64 %rd<126>; +; CHECK-NEXT: .reg .b64 %rd<127>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: // %_udiv-special-cases ; CHECK-NEXT: ld.param.v2.b64 {%rd45, %rd46}, [srem_i128_param_0]; @@ -42,103 +42,102 @@ define i128 @srem_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: cvt.u64.u32 %rd62, %r4; ; CHECK-NEXT: add.s64 %rd63, %rd62, 64; ; CHECK-NEXT: selp.b64 %rd64, %rd61, %rd63, %p7; -; CHECK-NEXT: mov.b64 %rd116, 0; +; CHECK-NEXT: mov.b64 %rd117, 0; ; CHECK-NEXT: sub.cc.s64 %rd66, %rd60, %rd64; -; CHECK-NEXT: subc.cc.s64 %rd8, %rd116, 0; -; CHECK-NEXT: setp.ne.b64 %p8, %rd8, 0; -; CHECK-NEXT: and.pred %p10, %p8, %p8; -; CHECK-NEXT: setp.eq.b64 %p11, %rd8, 0; -; CHECK-NEXT: setp.gt.u64 %p12, %rd66, 127; -; CHECK-NEXT: and.pred %p13, %p11, %p12; -; CHECK-NEXT: or.pred %p14, %p13, %p10; -; CHECK-NEXT: or.pred %p15, %p5, %p14; -; CHECK-NEXT: xor.b64 %rd67, %rd66, 127; -; CHECK-NEXT: or.b64 %rd68, %rd67, %rd8; -; CHECK-NEXT: setp.eq.b64 %p16, %rd68, 0; -; CHECK-NEXT: selp.b64 %rd125, 0, %rd4, %p15; -; CHECK-NEXT: selp.b64 %rd124, 0, %rd3, %p15; -; CHECK-NEXT: or.pred %p17, %p15, %p16; -; CHECK-NEXT: @%p17 bra $L__BB0_5; +; CHECK-NEXT: subc.cc.s64 %rd67, %rd117, 0; +; CHECK-NEXT: setp.gt.u64 %p8, %rd66, 127; +; CHECK-NEXT: setp.eq.b64 %p9, %rd67, 0; +; CHECK-NEXT: and.pred %p10, %p9, %p8; +; CHECK-NEXT: setp.ne.b64 %p11, %rd67, 0; +; CHECK-NEXT: or.pred %p12, %p10, %p11; +; CHECK-NEXT: or.pred %p13, %p5, %p12; +; CHECK-NEXT: xor.b64 %rd68, %rd66, 127; +; CHECK-NEXT: or.b64 %rd69, %rd68, %rd67; +; CHECK-NEXT: setp.eq.b64 %p14, %rd69, 0; +; CHECK-NEXT: selp.b64 %rd126, 0, %rd4, %p13; +; CHECK-NEXT: selp.b64 %rd125, 0, %rd3, %p13; +; CHECK-NEXT: or.pred %p15, %p13, %p14; +; CHECK-NEXT: @%p15 bra $L__BB0_5; ; CHECK-NEXT: // %bb.3: // %udiv-bb1 -; CHECK-NEXT: add.cc.s64 %rd118, %rd66, 1; -; CHECK-NEXT: addc.cc.s64 %rd119, %rd8, 0; -; CHECK-NEXT: or.b64 %rd71, %rd118, %rd119; -; CHECK-NEXT: setp.eq.b64 %p18, %rd71, 0; +; CHECK-NEXT: add.cc.s64 %rd119, %rd66, 1; +; CHECK-NEXT: addc.cc.s64 %rd120, %rd67, 0; +; CHECK-NEXT: or.b64 %rd72, %rd119, %rd120; +; CHECK-NEXT: setp.eq.b64 %p16, %rd72, 0; ; CHECK-NEXT: cvt.u32.u64 %r5, %rd66; ; CHECK-NEXT: sub.s32 %r6, 127, %r5; -; CHECK-NEXT: shl.b64 %rd72, %rd4, %r6; +; CHECK-NEXT: shl.b64 %rd73, %rd4, %r6; ; CHECK-NEXT: sub.s32 %r7, 64, %r6; -; CHECK-NEXT: shr.u64 %rd73, %rd3, %r7; -; CHECK-NEXT: or.b64 %rd74, %rd72, %rd73; +; CHECK-NEXT: shr.u64 %rd74, %rd3, %r7; +; CHECK-NEXT: or.b64 %rd75, %rd73, %rd74; ; CHECK-NEXT: sub.s32 %r8, 63, %r5; -; CHECK-NEXT: shl.b64 %rd75, %rd3, %r8; -; CHECK-NEXT: setp.gt.s32 %p19, %r6, 63; -; CHECK-NEXT: selp.b64 %rd123, %rd75, %rd74, %p19; -; CHECK-NEXT: shl.b64 %rd122, %rd3, %r6; -; CHECK-NEXT: mov.b64 %rd113, %rd116; -; CHECK-NEXT: @%p18 bra $L__BB0_4; +; CHECK-NEXT: shl.b64 %rd76, %rd3, %r8; +; CHECK-NEXT: setp.gt.s32 %p17, %r6, 63; +; CHECK-NEXT: selp.b64 %rd124, %rd76, %rd75, %p17; +; CHECK-NEXT: shl.b64 %rd123, %rd3, %r6; +; CHECK-NEXT: mov.b64 %rd114, %rd117; +; CHECK-NEXT: @%p16 bra $L__BB0_4; ; CHECK-NEXT: // %bb.1: // %udiv-preheader -; CHECK-NEXT: cvt.u32.u64 %r9, %rd118; -; CHECK-NEXT: shr.u64 %rd78, %rd3, %r9; +; CHECK-NEXT: cvt.u32.u64 %r9, %rd119; +; CHECK-NEXT: shr.u64 %rd79, %rd3, %r9; ; CHECK-NEXT: sub.s32 %r10, 64, %r9; -; CHECK-NEXT: shl.b64 %rd79, %rd4, %r10; -; CHECK-NEXT: or.b64 %rd80, %rd78, %rd79; +; CHECK-NEXT: shl.b64 %rd80, %rd4, %r10; +; CHECK-NEXT: or.b64 %rd81, %rd79, %rd80; ; CHECK-NEXT: add.s32 %r11, %r9, -64; -; CHECK-NEXT: shr.u64 %rd81, %rd4, %r11; -; CHECK-NEXT: setp.gt.s32 %p20, %r9, 63; -; CHECK-NEXT: selp.b64 %rd120, %rd81, %rd80, %p20; -; CHECK-NEXT: shr.u64 %rd121, %rd4, %r9; +; CHECK-NEXT: shr.u64 %rd82, %rd4, %r11; +; CHECK-NEXT: setp.gt.s32 %p18, %r9, 63; +; CHECK-NEXT: selp.b64 %rd121, %rd82, %rd81, %p18; +; CHECK-NEXT: shr.u64 %rd122, %rd4, %r9; ; CHECK-NEXT: add.cc.s64 %rd35, %rd5, -1; ; CHECK-NEXT: addc.cc.s64 %rd36, %rd6, -1; -; CHECK-NEXT: mov.b64 %rd113, 0; -; CHECK-NEXT: mov.b64 %rd116, %rd113; +; CHECK-NEXT: mov.b64 %rd114, 0; +; CHECK-NEXT: mov.b64 %rd117, %rd114; ; CHECK-NEXT: $L__BB0_2: // %udiv-do-while ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: shr.u64 %rd82, %rd120, 63; -; CHECK-NEXT: shl.b64 %rd83, %rd121, 1; -; CHECK-NEXT: or.b64 %rd84, %rd83, %rd82; -; CHECK-NEXT: shl.b64 %rd85, %rd120, 1; -; CHECK-NEXT: shr.u64 %rd86, %rd123, 63; -; CHECK-NEXT: or.b64 %rd87, %rd85, %rd86; -; CHECK-NEXT: shr.u64 %rd88, %rd122, 63; -; CHECK-NEXT: shl.b64 %rd89, %rd123, 1; -; CHECK-NEXT: or.b64 %rd90, %rd89, %rd88; -; CHECK-NEXT: shl.b64 %rd91, %rd122, 1; -; CHECK-NEXT: or.b64 %rd122, %rd116, %rd91; -; CHECK-NEXT: or.b64 %rd123, %rd113, %rd90; -; CHECK-NEXT: sub.cc.s64 %rd92, %rd35, %rd87; -; CHECK-NEXT: subc.cc.s64 %rd93, %rd36, %rd84; -; CHECK-NEXT: shr.s64 %rd94, %rd93, 63; -; CHECK-NEXT: and.b64 %rd116, %rd94, 1; -; CHECK-NEXT: and.b64 %rd95, %rd94, %rd5; -; CHECK-NEXT: and.b64 %rd96, %rd94, %rd6; -; CHECK-NEXT: sub.cc.s64 %rd120, %rd87, %rd95; -; CHECK-NEXT: subc.cc.s64 %rd121, %rd84, %rd96; -; CHECK-NEXT: add.cc.s64 %rd118, %rd118, -1; -; CHECK-NEXT: addc.cc.s64 %rd119, %rd119, -1; -; CHECK-NEXT: or.b64 %rd97, %rd118, %rd119; -; CHECK-NEXT: setp.eq.b64 %p21, %rd97, 0; -; CHECK-NEXT: @%p21 bra $L__BB0_4; +; CHECK-NEXT: shr.u64 %rd83, %rd121, 63; +; CHECK-NEXT: shl.b64 %rd84, %rd122, 1; +; CHECK-NEXT: or.b64 %rd85, %rd84, %rd83; +; CHECK-NEXT: shl.b64 %rd86, %rd121, 1; +; CHECK-NEXT: shr.u64 %rd87, %rd124, 63; +; CHECK-NEXT: or.b64 %rd88, %rd86, %rd87; +; CHECK-NEXT: shr.u64 %rd89, %rd123, 63; +; CHECK-NEXT: shl.b64 %rd90, %rd124, 1; +; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89; +; CHECK-NEXT: shl.b64 %rd92, %rd123, 1; +; CHECK-NEXT: or.b64 %rd123, %rd117, %rd92; +; CHECK-NEXT: or.b64 %rd124, %rd114, %rd91; +; CHECK-NEXT: sub.cc.s64 %rd93, %rd35, %rd88; +; CHECK-NEXT: subc.cc.s64 %rd94, %rd36, %rd85; +; CHECK-NEXT: shr.s64 %rd95, %rd94, 63; +; CHECK-NEXT: and.b64 %rd117, %rd95, 1; +; CHECK-NEXT: and.b64 %rd96, %rd95, %rd5; +; CHECK-NEXT: and.b64 %rd97, %rd95, %rd6; +; CHECK-NEXT: sub.cc.s64 %rd121, %rd88, %rd96; +; CHECK-NEXT: subc.cc.s64 %rd122, %rd85, %rd97; +; CHECK-NEXT: add.cc.s64 %rd119, %rd119, -1; +; CHECK-NEXT: addc.cc.s64 %rd120, %rd120, -1; +; CHECK-NEXT: or.b64 %rd98, %rd119, %rd120; +; CHECK-NEXT: setp.eq.b64 %p19, %rd98, 0; +; CHECK-NEXT: @%p19 bra $L__BB0_4; ; CHECK-NEXT: bra.uni $L__BB0_2; ; CHECK-NEXT: $L__BB0_4: // %udiv-loop-exit -; CHECK-NEXT: shr.u64 %rd98, %rd122, 63; -; CHECK-NEXT: shl.b64 %rd99, %rd123, 1; -; CHECK-NEXT: or.b64 %rd100, %rd99, %rd98; -; CHECK-NEXT: shl.b64 %rd101, %rd122, 1; -; CHECK-NEXT: or.b64 %rd124, %rd116, %rd101; -; CHECK-NEXT: or.b64 %rd125, %rd113, %rd100; +; CHECK-NEXT: shr.u64 %rd99, %rd123, 63; +; CHECK-NEXT: shl.b64 %rd100, %rd124, 1; +; CHECK-NEXT: or.b64 %rd101, %rd100, %rd99; +; CHECK-NEXT: shl.b64 %rd102, %rd123, 1; +; CHECK-NEXT: or.b64 %rd125, %rd117, %rd102; +; CHECK-NEXT: or.b64 %rd126, %rd114, %rd101; ; CHECK-NEXT: $L__BB0_5: // %udiv-end -; CHECK-NEXT: mul.hi.u64 %rd102, %rd5, %rd124; -; CHECK-NEXT: mad.lo.s64 %rd103, %rd5, %rd125, %rd102; -; CHECK-NEXT: mad.lo.s64 %rd104, %rd6, %rd124, %rd103; -; CHECK-NEXT: mul.lo.s64 %rd105, %rd5, %rd124; -; CHECK-NEXT: sub.cc.s64 %rd106, %rd3, %rd105; -; CHECK-NEXT: subc.cc.s64 %rd107, %rd4, %rd104; -; CHECK-NEXT: xor.b64 %rd108, %rd106, %rd2; +; CHECK-NEXT: mul.hi.u64 %rd103, %rd5, %rd125; +; CHECK-NEXT: mad.lo.s64 %rd104, %rd5, %rd126, %rd103; +; CHECK-NEXT: mad.lo.s64 %rd105, %rd6, %rd125, %rd104; +; CHECK-NEXT: mul.lo.s64 %rd106, %rd5, %rd125; +; CHECK-NEXT: sub.cc.s64 %rd107, %rd3, %rd106; +; CHECK-NEXT: subc.cc.s64 %rd108, %rd4, %rd105; ; CHECK-NEXT: xor.b64 %rd109, %rd107, %rd2; -; CHECK-NEXT: sub.cc.s64 %rd110, %rd108, %rd2; -; CHECK-NEXT: subc.cc.s64 %rd111, %rd109, %rd2; -; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd110, %rd111}; +; CHECK-NEXT: xor.b64 %rd110, %rd108, %rd2; +; CHECK-NEXT: sub.cc.s64 %rd111, %rd109, %rd2; +; CHECK-NEXT: subc.cc.s64 %rd112, %rd110, %rd2; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd111, %rd112}; ; CHECK-NEXT: ret; %div = srem i128 %lhs, %rhs ret i128 %div @@ -149,7 +148,7 @@ define i128 @urem_i128(i128 %lhs, i128 %rhs) { ; CHECK: { ; CHECK-NEXT: .reg .pred %p<18>; ; CHECK-NEXT: .reg .b32 %r<12>; -; CHECK-NEXT: .reg .b64 %rd<111>; +; CHECK-NEXT: .reg .b64 %rd<113>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: // %_udiv-special-cases ; CHECK-NEXT: ld.param.v2.b64 {%rd41, %rd42}, [urem_i128_param_0]; @@ -173,98 +172,98 @@ define i128 @urem_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: cvt.u64.u32 %rd52, %r4; ; CHECK-NEXT: add.s64 %rd53, %rd52, 64; ; CHECK-NEXT: selp.b64 %rd54, %rd51, %rd53, %p5; -; CHECK-NEXT: mov.b64 %rd101, 0; -; CHECK-NEXT: sub.cc.s64 %rd5, %rd50, %rd54; -; CHECK-NEXT: subc.cc.s64 %rd6, %rd101, 0; -; CHECK-NEXT: setp.gt.u64 %p6, %rd5, 127; -; CHECK-NEXT: setp.eq.b64 %p7, %rd6, 0; +; CHECK-NEXT: mov.b64 %rd103, 0; +; CHECK-NEXT: sub.cc.s64 %rd56, %rd50, %rd54; +; CHECK-NEXT: subc.cc.s64 %rd57, %rd103, 0; +; CHECK-NEXT: setp.gt.u64 %p6, %rd56, 127; +; CHECK-NEXT: setp.eq.b64 %p7, %rd57, 0; ; CHECK-NEXT: and.pred %p8, %p7, %p6; -; CHECK-NEXT: setp.ne.b64 %p9, %rd6, 0; +; CHECK-NEXT: setp.ne.b64 %p9, %rd57, 0; ; CHECK-NEXT: or.pred %p10, %p8, %p9; ; CHECK-NEXT: or.pred %p11, %p3, %p10; -; CHECK-NEXT: xor.b64 %rd56, %rd5, 127; -; CHECK-NEXT: or.b64 %rd57, %rd56, %rd6; -; CHECK-NEXT: setp.eq.b64 %p12, %rd57, 0; -; CHECK-NEXT: selp.b64 %rd110, 0, %rd42, %p11; -; CHECK-NEXT: selp.b64 %rd109, 0, %rd41, %p11; +; CHECK-NEXT: xor.b64 %rd58, %rd56, 127; +; CHECK-NEXT: or.b64 %rd59, %rd58, %rd57; +; CHECK-NEXT: setp.eq.b64 %p12, %rd59, 0; +; CHECK-NEXT: selp.b64 %rd112, 0, %rd42, %p11; +; CHECK-NEXT: selp.b64 %rd111, 0, %rd41, %p11; ; CHECK-NEXT: or.pred %p13, %p11, %p12; ; CHECK-NEXT: @%p13 bra $L__BB1_5; ; CHECK-NEXT: // %bb.3: // %udiv-bb1 -; CHECK-NEXT: add.cc.s64 %rd103, %rd5, 1; -; CHECK-NEXT: addc.cc.s64 %rd104, %rd6, 0; -; CHECK-NEXT: or.b64 %rd60, %rd103, %rd104; -; CHECK-NEXT: setp.eq.b64 %p14, %rd60, 0; -; CHECK-NEXT: cvt.u32.u64 %r5, %rd5; +; CHECK-NEXT: add.cc.s64 %rd105, %rd56, 1; +; CHECK-NEXT: addc.cc.s64 %rd106, %rd57, 0; +; CHECK-NEXT: or.b64 %rd62, %rd105, %rd106; +; CHECK-NEXT: setp.eq.b64 %p14, %rd62, 0; +; CHECK-NEXT: cvt.u32.u64 %r5, %rd56; ; CHECK-NEXT: sub.s32 %r6, 127, %r5; -; CHECK-NEXT: shl.b64 %rd61, %rd42, %r6; +; CHECK-NEXT: shl.b64 %rd63, %rd42, %r6; ; CHECK-NEXT: sub.s32 %r7, 64, %r6; -; CHECK-NEXT: shr.u64 %rd62, %rd41, %r7; -; CHECK-NEXT: or.b64 %rd63, %rd61, %rd62; +; CHECK-NEXT: shr.u64 %rd64, %rd41, %r7; +; CHECK-NEXT: or.b64 %rd65, %rd63, %rd64; ; CHECK-NEXT: sub.s32 %r8, 63, %r5; -; CHECK-NEXT: shl.b64 %rd64, %rd41, %r8; +; CHECK-NEXT: shl.b64 %rd66, %rd41, %r8; ; CHECK-NEXT: setp.gt.s32 %p15, %r6, 63; -; CHECK-NEXT: selp.b64 %rd108, %rd64, %rd63, %p15; -; CHECK-NEXT: shl.b64 %rd107, %rd41, %r6; -; CHECK-NEXT: mov.b64 %rd98, %rd101; +; CHECK-NEXT: selp.b64 %rd110, %rd66, %rd65, %p15; +; CHECK-NEXT: shl.b64 %rd109, %rd41, %r6; +; CHECK-NEXT: mov.b64 %rd100, %rd103; ; CHECK-NEXT: @%p14 bra $L__BB1_4; ; CHECK-NEXT: // %bb.1: // %udiv-preheader -; CHECK-NEXT: cvt.u32.u64 %r9, %rd103; -; CHECK-NEXT: shr.u64 %rd67, %rd41, %r9; +; CHECK-NEXT: cvt.u32.u64 %r9, %rd105; +; CHECK-NEXT: shr.u64 %rd69, %rd41, %r9; ; CHECK-NEXT: sub.s32 %r10, 64, %r9; -; CHECK-NEXT: shl.b64 %rd68, %rd42, %r10; -; CHECK-NEXT: or.b64 %rd69, %rd67, %rd68; +; CHECK-NEXT: shl.b64 %rd70, %rd42, %r10; +; CHECK-NEXT: or.b64 %rd71, %rd69, %rd70; ; CHECK-NEXT: add.s32 %r11, %r9, -64; -; CHECK-NEXT: shr.u64 %rd70, %rd42, %r11; +; CHECK-NEXT: shr.u64 %rd72, %rd42, %r11; ; CHECK-NEXT: setp.gt.s32 %p16, %r9, 63; -; CHECK-NEXT: selp.b64 %rd105, %rd70, %rd69, %p16; -; CHECK-NEXT: shr.u64 %rd106, %rd42, %r9; +; CHECK-NEXT: selp.b64 %rd107, %rd72, %rd71, %p16; +; CHECK-NEXT: shr.u64 %rd108, %rd42, %r9; ; CHECK-NEXT: add.cc.s64 %rd33, %rd3, -1; ; CHECK-NEXT: addc.cc.s64 %rd34, %rd4, -1; -; CHECK-NEXT: mov.b64 %rd98, 0; -; CHECK-NEXT: mov.b64 %rd101, %rd98; +; CHECK-NEXT: mov.b64 %rd100, 0; +; CHECK-NEXT: mov.b64 %rd103, %rd100; ; CHECK-NEXT: $L__BB1_2: // %udiv-do-while ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: shr.u64 %rd71, %rd105, 63; -; CHECK-NEXT: shl.b64 %rd72, %rd106, 1; -; CHECK-NEXT: or.b64 %rd73, %rd72, %rd71; -; CHECK-NEXT: shl.b64 %rd74, %rd105, 1; -; CHECK-NEXT: shr.u64 %rd75, %rd108, 63; -; CHECK-NEXT: or.b64 %rd76, %rd74, %rd75; -; CHECK-NEXT: shr.u64 %rd77, %rd107, 63; -; CHECK-NEXT: shl.b64 %rd78, %rd108, 1; -; CHECK-NEXT: or.b64 %rd79, %rd78, %rd77; -; CHECK-NEXT: shl.b64 %rd80, %rd107, 1; -; CHECK-NEXT: or.b64 %rd107, %rd101, %rd80; -; CHECK-NEXT: or.b64 %rd108, %rd98, %rd79; -; CHECK-NEXT: sub.cc.s64 %rd81, %rd33, %rd76; -; CHECK-NEXT: subc.cc.s64 %rd82, %rd34, %rd73; -; CHECK-NEXT: shr.s64 %rd83, %rd82, 63; -; CHECK-NEXT: and.b64 %rd101, %rd83, 1; -; CHECK-NEXT: and.b64 %rd84, %rd83, %rd3; -; CHECK-NEXT: and.b64 %rd85, %rd83, %rd4; -; CHECK-NEXT: sub.cc.s64 %rd105, %rd76, %rd84; -; CHECK-NEXT: subc.cc.s64 %rd106, %rd73, %rd85; -; CHECK-NEXT: add.cc.s64 %rd103, %rd103, -1; -; CHECK-NEXT: addc.cc.s64 %rd104, %rd104, -1; -; CHECK-NEXT: or.b64 %rd86, %rd103, %rd104; -; CHECK-NEXT: setp.eq.b64 %p17, %rd86, 0; +; CHECK-NEXT: shr.u64 %rd73, %rd107, 63; +; CHECK-NEXT: shl.b64 %rd74, %rd108, 1; +; CHECK-NEXT: or.b64 %rd75, %rd74, %rd73; +; CHECK-NEXT: shl.b64 %rd76, %rd107, 1; +; CHECK-NEXT: shr.u64 %rd77, %rd110, 63; +; CHECK-NEXT: or.b64 %rd78, %rd76, %rd77; +; CHECK-NEXT: shr.u64 %rd79, %rd109, 63; +; CHECK-NEXT: shl.b64 %rd80, %rd110, 1; +; CHECK-NEXT: or.b64 %rd81, %rd80, %rd79; +; CHECK-NEXT: shl.b64 %rd82, %rd109, 1; +; CHECK-NEXT: or.b64 %rd109, %rd103, %rd82; +; CHECK-NEXT: or.b64 %rd110, %rd100, %rd81; +; CHECK-NEXT: sub.cc.s64 %rd83, %rd33, %rd78; +; CHECK-NEXT: subc.cc.s64 %rd84, %rd34, %rd75; +; CHECK-NEXT: shr.s64 %rd85, %rd84, 63; +; CHECK-NEXT: and.b64 %rd103, %rd85, 1; +; CHECK-NEXT: and.b64 %rd86, %rd85, %rd3; +; CHECK-NEXT: and.b64 %rd87, %rd85, %rd4; +; CHECK-NEXT: sub.cc.s64 %rd107, %rd78, %rd86; +; CHECK-NEXT: subc.cc.s64 %rd108, %rd75, %rd87; +; CHECK-NEXT: add.cc.s64 %rd105, %rd105, -1; +; CHECK-NEXT: addc.cc.s64 %rd106, %rd106, -1; +; CHECK-NEXT: or.b64 %rd88, %rd105, %rd106; +; CHECK-NEXT: setp.eq.b64 %p17, %rd88, 0; ; CHECK-NEXT: @%p17 bra $L__BB1_4; ; CHECK-NEXT: bra.uni $L__BB1_2; ; CHECK-NEXT: $L__BB1_4: // %udiv-loop-exit -; CHECK-NEXT: shr.u64 %rd87, %rd107, 63; -; CHECK-NEXT: shl.b64 %rd88, %rd108, 1; -; CHECK-NEXT: or.b64 %rd89, %rd88, %rd87; -; CHECK-NEXT: shl.b64 %rd90, %rd107, 1; -; CHECK-NEXT: or.b64 %rd109, %rd101, %rd90; -; CHECK-NEXT: or.b64 %rd110, %rd98, %rd89; +; CHECK-NEXT: shr.u64 %rd89, %rd109, 63; +; CHECK-NEXT: shl.b64 %rd90, %rd110, 1; +; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89; +; CHECK-NEXT: shl.b64 %rd92, %rd109, 1; +; CHECK-NEXT: or.b64 %rd111, %rd103, %rd92; +; CHECK-NEXT: or.b64 %rd112, %rd100, %rd91; ; CHECK-NEXT: $L__BB1_5: // %udiv-end -; CHECK-NEXT: mul.hi.u64 %rd91, %rd3, %rd109; -; CHECK-NEXT: mad.lo.s64 %rd92, %rd3, %rd110, %rd91; -; CHECK-NEXT: mad.lo.s64 %rd93, %rd4, %rd109, %rd92; -; CHECK-NEXT: mul.lo.s64 %rd94, %rd3, %rd109; -; CHECK-NEXT: sub.cc.s64 %rd95, %rd41, %rd94; -; CHECK-NEXT: subc.cc.s64 %rd96, %rd42, %rd93; -; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd95, %rd96}; +; CHECK-NEXT: mul.hi.u64 %rd93, %rd3, %rd111; +; CHECK-NEXT: mad.lo.s64 %rd94, %rd3, %rd112, %rd93; +; CHECK-NEXT: mad.lo.s64 %rd95, %rd4, %rd111, %rd94; +; CHECK-NEXT: mul.lo.s64 %rd96, %rd3, %rd111; +; CHECK-NEXT: sub.cc.s64 %rd97, %rd41, %rd96; +; CHECK-NEXT: subc.cc.s64 %rd98, %rd42, %rd95; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd97, %rd98}; ; CHECK-NEXT: ret; %div = urem i128 %lhs, %rhs ret i128 %div @@ -307,9 +306,9 @@ define i128 @urem_i128_pow2k(i128 %lhs) { define i128 @sdiv_i128(i128 %lhs, i128 %rhs) { ; CHECK-LABEL: sdiv_i128( ; CHECK: { -; CHECK-NEXT: .reg .pred %p<22>; +; CHECK-NEXT: .reg .pred %p<20>; ; CHECK-NEXT: .reg .b32 %r<12>; -; CHECK-NEXT: .reg .b64 %rd<121>; +; CHECK-NEXT: .reg .b64 %rd<122>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: // %_udiv-special-cases ; CHECK-NEXT: ld.param.v2.b64 {%rd45, %rd46}, [sdiv_i128_param_0]; @@ -345,97 +344,96 @@ define i128 @sdiv_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: cvt.u64.u32 %rd63, %r4; ; CHECK-NEXT: add.s64 %rd64, %rd63, 64; ; CHECK-NEXT: selp.b64 %rd65, %rd62, %rd64, %p7; -; CHECK-NEXT: mov.b64 %rd111, 0; +; CHECK-NEXT: mov.b64 %rd112, 0; ; CHECK-NEXT: sub.cc.s64 %rd67, %rd61, %rd65; -; CHECK-NEXT: subc.cc.s64 %rd8, %rd111, 0; -; CHECK-NEXT: setp.ne.b64 %p8, %rd8, 0; -; CHECK-NEXT: and.pred %p10, %p8, %p8; -; CHECK-NEXT: setp.eq.b64 %p11, %rd8, 0; -; CHECK-NEXT: setp.gt.u64 %p12, %rd67, 127; -; CHECK-NEXT: and.pred %p13, %p11, %p12; -; CHECK-NEXT: or.pred %p14, %p13, %p10; -; CHECK-NEXT: or.pred %p15, %p5, %p14; -; CHECK-NEXT: xor.b64 %rd68, %rd67, 127; -; CHECK-NEXT: or.b64 %rd69, %rd68, %rd8; -; CHECK-NEXT: setp.eq.b64 %p16, %rd69, 0; -; CHECK-NEXT: selp.b64 %rd120, 0, %rd2, %p15; -; CHECK-NEXT: selp.b64 %rd119, 0, %rd1, %p15; -; CHECK-NEXT: or.pred %p17, %p15, %p16; -; CHECK-NEXT: @%p17 bra $L__BB4_5; +; CHECK-NEXT: subc.cc.s64 %rd68, %rd112, 0; +; CHECK-NEXT: setp.gt.u64 %p8, %rd67, 127; +; CHECK-NEXT: setp.eq.b64 %p9, %rd68, 0; +; CHECK-NEXT: and.pred %p10, %p9, %p8; +; CHECK-NEXT: setp.ne.b64 %p11, %rd68, 0; +; CHECK-NEXT: or.pred %p12, %p10, %p11; +; CHECK-NEXT: or.pred %p13, %p5, %p12; +; CHECK-NEXT: xor.b64 %rd69, %rd67, 127; +; CHECK-NEXT: or.b64 %rd70, %rd69, %rd68; +; CHECK-NEXT: setp.eq.b64 %p14, %rd70, 0; +; CHECK-NEXT: selp.b64 %rd121, 0, %rd2, %p13; +; CHECK-NEXT: selp.b64 %rd120, 0, %rd1, %p13; +; CHECK-NEXT: or.pred %p15, %p13, %p14; +; CHECK-NEXT: @%p15 bra $L__BB4_5; ; CHECK-NEXT: // %bb.3: // %udiv-bb1 -; CHECK-NEXT: add.cc.s64 %rd113, %rd67, 1; -; CHECK-NEXT: addc.cc.s64 %rd114, %rd8, 0; -; CHECK-NEXT: or.b64 %rd72, %rd113, %rd114; -; CHECK-NEXT: setp.eq.b64 %p18, %rd72, 0; +; CHECK-NEXT: add.cc.s64 %rd114, %rd67, 1; +; CHECK-NEXT: addc.cc.s64 %rd115, %rd68, 0; +; CHECK-NEXT: or.b64 %rd73, %rd114, %rd115; +; CHECK-NEXT: setp.eq.b64 %p16, %rd73, 0; ; CHECK-NEXT: cvt.u32.u64 %r5, %rd67; ; CHECK-NEXT: sub.s32 %r6, 127, %r5; -; CHECK-NEXT: shl.b64 %rd73, %rd2, %r6; +; CHECK-NEXT: shl.b64 %rd74, %rd2, %r6; ; CHECK-NEXT: sub.s32 %r7, 64, %r6; -; CHECK-NEXT: shr.u64 %rd74, %rd1, %r7; -; CHECK-NEXT: or.b64 %rd75, %rd73, %rd74; +; CHECK-NEXT: shr.u64 %rd75, %rd1, %r7; +; CHECK-NEXT: or.b64 %rd76, %rd74, %rd75; ; CHECK-NEXT: sub.s32 %r8, 63, %r5; -; CHECK-NEXT: shl.b64 %rd76, %rd1, %r8; -; CHECK-NEXT: setp.gt.s32 %p19, %r6, 63; -; CHECK-NEXT: selp.b64 %rd118, %rd76, %rd75, %p19; -; CHECK-NEXT: shl.b64 %rd117, %rd1, %r6; -; CHECK-NEXT: mov.b64 %rd108, %rd111; -; CHECK-NEXT: @%p18 bra $L__BB4_4; +; CHECK-NEXT: shl.b64 %rd77, %rd1, %r8; +; CHECK-NEXT: setp.gt.s32 %p17, %r6, 63; +; CHECK-NEXT: selp.b64 %rd119, %rd77, %rd76, %p17; +; CHECK-NEXT: shl.b64 %rd118, %rd1, %r6; +; CHECK-NEXT: mov.b64 %rd109, %rd112; +; CHECK-NEXT: @%p16 bra $L__BB4_4; ; CHECK-NEXT: // %bb.1: // %udiv-preheader -; CHECK-NEXT: cvt.u32.u64 %r9, %rd113; -; CHECK-NEXT: shr.u64 %rd79, %rd1, %r9; +; CHECK-NEXT: cvt.u32.u64 %r9, %rd114; +; CHECK-NEXT: shr.u64 %rd80, %rd1, %r9; ; CHECK-NEXT: sub.s32 %r10, 64, %r9; -; CHECK-NEXT: shl.b64 %rd80, %rd2, %r10; -; CHECK-NEXT: or.b64 %rd81, %rd79, %rd80; +; CHECK-NEXT: shl.b64 %rd81, %rd2, %r10; +; CHECK-NEXT: or.b64 %rd82, %rd80, %rd81; ; CHECK-NEXT: add.s32 %r11, %r9, -64; -; CHECK-NEXT: shr.u64 %rd82, %rd2, %r11; -; CHECK-NEXT: setp.gt.s32 %p20, %r9, 63; -; CHECK-NEXT: selp.b64 %rd115, %rd82, %rd81, %p20; -; CHECK-NEXT: shr.u64 %rd116, %rd2, %r9; +; CHECK-NEXT: shr.u64 %rd83, %rd2, %r11; +; CHECK-NEXT: setp.gt.s32 %p18, %r9, 63; +; CHECK-NEXT: selp.b64 %rd116, %rd83, %rd82, %p18; +; CHECK-NEXT: shr.u64 %rd117, %rd2, %r9; ; CHECK-NEXT: add.cc.s64 %rd35, %rd3, -1; ; CHECK-NEXT: addc.cc.s64 %rd36, %rd4, -1; -; CHECK-NEXT: mov.b64 %rd108, 0; -; CHECK-NEXT: mov.b64 %rd111, %rd108; +; CHECK-NEXT: mov.b64 %rd109, 0; +; CHECK-NEXT: mov.b64 %rd112, %rd109; ; CHECK-NEXT: $L__BB4_2: // %udiv-do-while ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: shr.u64 %rd83, %rd115, 63; -; CHECK-NEXT: shl.b64 %rd84, %rd116, 1; -; CHECK-NEXT: or.b64 %rd85, %rd84, %rd83; -; CHECK-NEXT: shl.b64 %rd86, %rd115, 1; -; CHECK-NEXT: shr.u64 %rd87, %rd118, 63; -; CHECK-NEXT: or.b64 %rd88, %rd86, %rd87; -; CHECK-NEXT: shr.u64 %rd89, %rd117, 63; -; CHECK-NEXT: shl.b64 %rd90, %rd118, 1; -; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89; -; CHECK-NEXT: shl.b64 %rd92, %rd117, 1; -; CHECK-NEXT: or.b64 %rd117, %rd111, %rd92; -; CHECK-NEXT: or.b64 %rd118, %rd108, %rd91; -; CHECK-NEXT: sub.cc.s64 %rd93, %rd35, %rd88; -; CHECK-NEXT: subc.cc.s64 %rd94, %rd36, %rd85; -; CHECK-NEXT: shr.s64 %rd95, %rd94, 63; -; CHECK-NEXT: and.b64 %rd111, %rd95, 1; -; CHECK-NEXT: and.b64 %rd96, %rd95, %rd3; -; CHECK-NEXT: and.b64 %rd97, %rd95, %rd4; -; CHECK-NEXT: sub.cc.s64 %rd115, %rd88, %rd96; -; CHECK-NEXT: subc.cc.s64 %rd116, %rd85, %rd97; -; CHECK-NEXT: add.cc.s64 %rd113, %rd113, -1; -; CHECK-NEXT: addc.cc.s64 %rd114, %rd114, -1; -; CHECK-NEXT: or.b64 %rd98, %rd113, %rd114; -; CHECK-NEXT: setp.eq.b64 %p21, %rd98, 0; -; CHECK-NEXT: @%p21 bra $L__BB4_4; +; CHECK-NEXT: shr.u64 %rd84, %rd116, 63; +; CHECK-NEXT: shl.b64 %rd85, %rd117, 1; +; CHECK-NEXT: or.b64 %rd86, %rd85, %rd84; +; CHECK-NEXT: shl.b64 %rd87, %rd116, 1; +; CHECK-NEXT: shr.u64 %rd88, %rd119, 63; +; CHECK-NEXT: or.b64 %rd89, %rd87, %rd88; +; CHECK-NEXT: shr.u64 %rd90, %rd118, 63; +; CHECK-NEXT: shl.b64 %rd91, %rd119, 1; +; CHECK-NEXT: or.b64 %rd92, %rd91, %rd90; +; CHECK-NEXT: shl.b64 %rd93, %rd118, 1; +; CHECK-NEXT: or.b64 %rd118, %rd112, %rd93; +; CHECK-NEXT: or.b64 %rd119, %rd109, %rd92; +; CHECK-NEXT: sub.cc.s64 %rd94, %rd35, %rd89; +; CHECK-NEXT: subc.cc.s64 %rd95, %rd36, %rd86; +; CHECK-NEXT: shr.s64 %rd96, %rd95, 63; +; CHECK-NEXT: and.b64 %rd112, %rd96, 1; +; CHECK-NEXT: and.b64 %rd97, %rd96, %rd3; +; CHECK-NEXT: and.b64 %rd98, %rd96, %rd4; +; CHECK-NEXT: sub.cc.s64 %rd116, %rd89, %rd97; +; CHECK-NEXT: subc.cc.s64 %rd117, %rd86, %rd98; +; CHECK-NEXT: add.cc.s64 %rd114, %rd114, -1; +; CHECK-NEXT: addc.cc.s64 %rd115, %rd115, -1; +; CHECK-NEXT: or.b64 %rd99, %rd114, %rd115; +; CHECK-NEXT: setp.eq.b64 %p19, %rd99, 0; +; CHECK-NEXT: @%p19 bra $L__BB4_4; ; CHECK-NEXT: bra.uni $L__BB4_2; ; CHECK-NEXT: $L__BB4_4: // %udiv-loop-exit -; CHECK-NEXT: shr.u64 %rd99, %rd117, 63; -; CHECK-NEXT: shl.b64 %rd100, %rd118, 1; -; CHECK-NEXT: or.b64 %rd101, %rd100, %rd99; -; CHECK-NEXT: shl.b64 %rd102, %rd117, 1; -; CHECK-NEXT: or.b64 %rd119, %rd111, %rd102; -; CHECK-NEXT: or.b64 %rd120, %rd108, %rd101; +; CHECK-NEXT: shr.u64 %rd100, %rd118, 63; +; CHECK-NEXT: shl.b64 %rd101, %rd119, 1; +; CHECK-NEXT: or.b64 %rd102, %rd101, %rd100; +; CHECK-NEXT: shl.b64 %rd103, %rd118, 1; +; CHECK-NEXT: or.b64 %rd120, %rd112, %rd103; +; CHECK-NEXT: or.b64 %rd121, %rd109, %rd102; ; CHECK-NEXT: $L__BB4_5: // %udiv-end -; CHECK-NEXT: xor.b64 %rd103, %rd119, %rd5; ; CHECK-NEXT: xor.b64 %rd104, %rd120, %rd5; -; CHECK-NEXT: sub.cc.s64 %rd105, %rd103, %rd5; -; CHECK-NEXT: subc.cc.s64 %rd106, %rd104, %rd5; -; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd105, %rd106}; +; CHECK-NEXT: xor.b64 %rd105, %rd121, %rd5; +; CHECK-NEXT: sub.cc.s64 %rd106, %rd104, %rd5; +; CHECK-NEXT: subc.cc.s64 %rd107, %rd105, %rd5; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd106, %rd107}; ; CHECK-NEXT: ret; %div = sdiv i128 %lhs, %rhs ret i128 %div @@ -446,7 +444,7 @@ define i128 @udiv_i128(i128 %lhs, i128 %rhs) { ; CHECK: { ; CHECK-NEXT: .reg .pred %p<18>; ; CHECK-NEXT: .reg .b32 %r<12>; -; CHECK-NEXT: .reg .b64 %rd<105>; +; CHECK-NEXT: .reg .b64 %rd<107>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: // %_udiv-special-cases ; CHECK-NEXT: ld.param.v2.b64 {%rd41, %rd42}, [udiv_i128_param_0]; @@ -470,92 +468,92 @@ define i128 @udiv_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: cvt.u64.u32 %rd52, %r4; ; CHECK-NEXT: add.s64 %rd53, %rd52, 64; ; CHECK-NEXT: selp.b64 %rd54, %rd51, %rd53, %p5; -; CHECK-NEXT: mov.b64 %rd95, 0; -; CHECK-NEXT: sub.cc.s64 %rd5, %rd50, %rd54; -; CHECK-NEXT: subc.cc.s64 %rd6, %rd95, 0; -; CHECK-NEXT: setp.gt.u64 %p6, %rd5, 127; -; CHECK-NEXT: setp.eq.b64 %p7, %rd6, 0; +; CHECK-NEXT: mov.b64 %rd97, 0; +; CHECK-NEXT: sub.cc.s64 %rd56, %rd50, %rd54; +; CHECK-NEXT: subc.cc.s64 %rd57, %rd97, 0; +; CHECK-NEXT: setp.gt.u64 %p6, %rd56, 127; +; CHECK-NEXT: setp.eq.b64 %p7, %rd57, 0; ; CHECK-NEXT: and.pred %p8, %p7, %p6; -; CHECK-NEXT: setp.ne.b64 %p9, %rd6, 0; +; CHECK-NEXT: setp.ne.b64 %p9, %rd57, 0; ; CHECK-NEXT: or.pred %p10, %p8, %p9; ; CHECK-NEXT: or.pred %p11, %p3, %p10; -; CHECK-NEXT: xor.b64 %rd56, %rd5, 127; -; CHECK-NEXT: or.b64 %rd57, %rd56, %rd6; -; CHECK-NEXT: setp.eq.b64 %p12, %rd57, 0; -; CHECK-NEXT: selp.b64 %rd104, 0, %rd42, %p11; -; CHECK-NEXT: selp.b64 %rd103, 0, %rd41, %p11; +; CHECK-NEXT: xor.b64 %rd58, %rd56, 127; +; CHECK-NEXT: or.b64 %rd59, %rd58, %rd57; +; CHECK-NEXT: setp.eq.b64 %p12, %rd59, 0; +; CHECK-NEXT: selp.b64 %rd106, 0, %rd42, %p11; +; CHECK-NEXT: selp.b64 %rd105, 0, %rd41, %p11; ; CHECK-NEXT: or.pred %p13, %p11, %p12; ; CHECK-NEXT: @%p13 bra $L__BB5_5; ; CHECK-NEXT: // %bb.3: // %udiv-bb1 -; CHECK-NEXT: add.cc.s64 %rd97, %rd5, 1; -; CHECK-NEXT: addc.cc.s64 %rd98, %rd6, 0; -; CHECK-NEXT: or.b64 %rd60, %rd97, %rd98; -; CHECK-NEXT: setp.eq.b64 %p14, %rd60, 0; -; CHECK-NEXT: cvt.u32.u64 %r5, %rd5; +; CHECK-NEXT: add.cc.s64 %rd99, %rd56, 1; +; CHECK-NEXT: addc.cc.s64 %rd100, %rd57, 0; +; CHECK-NEXT: or.b64 %rd62, %rd99, %rd100; +; CHECK-NEXT: setp.eq.b64 %p14, %rd62, 0; +; CHECK-NEXT: cvt.u32.u64 %r5, %rd56; ; CHECK-NEXT: sub.s32 %r6, 127, %r5; -; CHECK-NEXT: shl.b64 %rd61, %rd42, %r6; +; CHECK-NEXT: shl.b64 %rd63, %rd42, %r6; ; CHECK-NEXT: sub.s32 %r7, 64, %r6; -; CHECK-NEXT: shr.u64 %rd62, %rd41, %r7; -; CHECK-NEXT: or.b64 %rd63, %rd61, %rd62; +; CHECK-NEXT: shr.u64 %rd64, %rd41, %r7; +; CHECK-NEXT: or.b64 %rd65, %rd63, %rd64; ; CHECK-NEXT: sub.s32 %r8, 63, %r5; -; CHECK-NEXT: shl.b64 %rd64, %rd41, %r8; +; CHECK-NEXT: shl.b64 %rd66, %rd41, %r8; ; CHECK-NEXT: setp.gt.s32 %p15, %r6, 63; -; CHECK-NEXT: selp.b64 %rd102, %rd64, %rd63, %p15; -; CHECK-NEXT: shl.b64 %rd101, %rd41, %r6; -; CHECK-NEXT: mov.b64 %rd92, %rd95; +; CHECK-NEXT: selp.b64 %rd104, %rd66, %rd65, %p15; +; CHECK-NEXT: shl.b64 %rd103, %rd41, %r6; +; CHECK-NEXT: mov.b64 %rd94, %rd97; ; CHECK-NEXT: @%p14 bra $L__BB5_4; ; CHECK-NEXT: // %bb.1: // %udiv-preheader -; CHECK-NEXT: cvt.u32.u64 %r9, %rd97; -; CHECK-NEXT: shr.u64 %rd67, %rd41, %r9; +; CHECK-NEXT: cvt.u32.u64 %r9, %rd99; +; CHECK-NEXT: shr.u64 %rd69, %rd41, %r9; ; CHECK-NEXT: sub.s32 %r10, 64, %r9; -; CHECK-NEXT: shl.b64 %rd68, %rd42, %r10; -; CHECK-NEXT: or.b64 %rd69, %rd67, %rd68; +; CHECK-NEXT: shl.b64 %rd70, %rd42, %r10; +; CHECK-NEXT: or.b64 %rd71, %rd69, %rd70; ; CHECK-NEXT: add.s32 %r11, %r9, -64; -; CHECK-NEXT: shr.u64 %rd70, %rd42, %r11; +; CHECK-NEXT: shr.u64 %rd72, %rd42, %r11; ; CHECK-NEXT: setp.gt.s32 %p16, %r9, 63; -; CHECK-NEXT: selp.b64 %rd99, %rd70, %rd69, %p16; -; CHECK-NEXT: shr.u64 %rd100, %rd42, %r9; +; CHECK-NEXT: selp.b64 %rd101, %rd72, %rd71, %p16; +; CHECK-NEXT: shr.u64 %rd102, %rd42, %r9; ; CHECK-NEXT: add.cc.s64 %rd33, %rd43, -1; ; CHECK-NEXT: addc.cc.s64 %rd34, %rd44, -1; -; CHECK-NEXT: mov.b64 %rd92, 0; -; CHECK-NEXT: mov.b64 %rd95, %rd92; +; CHECK-NEXT: mov.b64 %rd94, 0; +; CHECK-NEXT: mov.b64 %rd97, %rd94; ; CHECK-NEXT: $L__BB5_2: // %udiv-do-while ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: shr.u64 %rd71, %rd99, 63; -; CHECK-NEXT: shl.b64 %rd72, %rd100, 1; -; CHECK-NEXT: or.b64 %rd73, %rd72, %rd71; -; CHECK-NEXT: shl.b64 %rd74, %rd99, 1; -; CHECK-NEXT: shr.u64 %rd75, %rd102, 63; -; CHECK-NEXT: or.b64 %rd76, %rd74, %rd75; -; CHECK-NEXT: shr.u64 %rd77, %rd101, 63; -; CHECK-NEXT: shl.b64 %rd78, %rd102, 1; -; CHECK-NEXT: or.b64 %rd79, %rd78, %rd77; -; CHECK-NEXT: shl.b64 %rd80, %rd101, 1; -; CHECK-NEXT: or.b64 %rd101, %rd95, %rd80; -; CHECK-NEXT: or.b64 %rd102, %rd92, %rd79; -; CHECK-NEXT: sub.cc.s64 %rd81, %rd33, %rd76; -; CHECK-NEXT: subc.cc.s64 %rd82, %rd34, %rd73; -; CHECK-NEXT: shr.s64 %rd83, %rd82, 63; -; CHECK-NEXT: and.b64 %rd95, %rd83, 1; -; CHECK-NEXT: and.b64 %rd84, %rd83, %rd43; -; CHECK-NEXT: and.b64 %rd85, %rd83, %rd44; -; CHECK-NEXT: sub.cc.s64 %rd99, %rd76, %rd84; -; CHECK-NEXT: subc.cc.s64 %rd100, %rd73, %rd85; -; CHECK-NEXT: add.cc.s64 %rd97, %rd97, -1; -; CHECK-NEXT: addc.cc.s64 %rd98, %rd98, -1; -; CHECK-NEXT: or.b64 %rd86, %rd97, %rd98; -; CHECK-NEXT: setp.eq.b64 %p17, %rd86, 0; +; CHECK-NEXT: shr.u64 %rd73, %rd101, 63; +; CHECK-NEXT: shl.b64 %rd74, %rd102, 1; +; CHECK-NEXT: or.b64 %rd75, %rd74, %rd73; +; CHECK-NEXT: shl.b64 %rd76, %rd101, 1; +; CHECK-NEXT: shr.u64 %rd77, %rd104, 63; +; CHECK-NEXT: or.b64 %rd78, %rd76, %rd77; +; CHECK-NEXT: shr.u64 %rd79, %rd103, 63; +; CHECK-NEXT: shl.b64 %rd80, %rd104, 1; +; CHECK-NEXT: or.b64 %rd81, %rd80, %rd79; +; CHECK-NEXT: shl.b64 %rd82, %rd103, 1; +; CHECK-NEXT: or.b64 %rd103, %rd97, %rd82; +; CHECK-NEXT: or.b64 %rd104, %rd94, %rd81; +; CHECK-NEXT: sub.cc.s64 %rd83, %rd33, %rd78; +; CHECK-NEXT: subc.cc.s64 %rd84, %rd34, %rd75; +; CHECK-NEXT: shr.s64 %rd85, %rd84, 63; +; CHECK-NEXT: and.b64 %rd97, %rd85, 1; +; CHECK-NEXT: and.b64 %rd86, %rd85, %rd43; +; CHECK-NEXT: and.b64 %rd87, %rd85, %rd44; +; CHECK-NEXT: sub.cc.s64 %rd101, %rd78, %rd86; +; CHECK-NEXT: subc.cc.s64 %rd102, %rd75, %rd87; +; CHECK-NEXT: add.cc.s64 %rd99, %rd99, -1; +; CHECK-NEXT: addc.cc.s64 %rd100, %rd100, -1; +; CHECK-NEXT: or.b64 %rd88, %rd99, %rd100; +; CHECK-NEXT: setp.eq.b64 %p17, %rd88, 0; ; CHECK-NEXT: @%p17 bra $L__BB5_4; ; CHECK-NEXT: bra.uni $L__BB5_2; ; CHECK-NEXT: $L__BB5_4: // %udiv-loop-exit -; CHECK-NEXT: shr.u64 %rd87, %rd101, 63; -; CHECK-NEXT: shl.b64 %rd88, %rd102, 1; -; CHECK-NEXT: or.b64 %rd89, %rd88, %rd87; -; CHECK-NEXT: shl.b64 %rd90, %rd101, 1; -; CHECK-NEXT: or.b64 %rd103, %rd95, %rd90; -; CHECK-NEXT: or.b64 %rd104, %rd92, %rd89; +; CHECK-NEXT: shr.u64 %rd89, %rd103, 63; +; CHECK-NEXT: shl.b64 %rd90, %rd104, 1; +; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89; +; CHECK-NEXT: shl.b64 %rd92, %rd103, 1; +; CHECK-NEXT: or.b64 %rd105, %rd97, %rd92; +; CHECK-NEXT: or.b64 %rd106, %rd94, %rd91; ; CHECK-NEXT: $L__BB5_5: // %udiv-end -; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd103, %rd104}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd105, %rd106}; ; CHECK-NEXT: ret; %div = udiv i128 %lhs, %rhs ret i128 %div diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll index 821cfd0..b540948 100644 --- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll @@ -764,8 +764,13 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr ; ; CHECK-PWR7-LABEL: sub_absv_8_ext: ; CHECK-PWR7: # %bb.0: # %entry -; CHECK-PWR7-NEXT: stdu r1, -448(r1) -; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 448 +; CHECK-PWR7-NEXT: stdu r1, -512(r1) +; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 512 +; CHECK-PWR7-NEXT: .cfi_offset r14, -144 +; CHECK-PWR7-NEXT: .cfi_offset r15, -136 +; CHECK-PWR7-NEXT: .cfi_offset r16, -128 +; CHECK-PWR7-NEXT: .cfi_offset r17, -120 +; CHECK-PWR7-NEXT: .cfi_offset r18, -112 ; CHECK-PWR7-NEXT: .cfi_offset r19, -104 ; CHECK-PWR7-NEXT: .cfi_offset r20, -96 ; CHECK-PWR7-NEXT: .cfi_offset r21, -88 @@ -778,258 +783,244 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr ; CHECK-PWR7-NEXT: .cfi_offset r28, -32 ; CHECK-PWR7-NEXT: .cfi_offset r29, -24 ; CHECK-PWR7-NEXT: .cfi_offset r30, -16 -; CHECK-PWR7-NEXT: addi r3, r1, 304 -; CHECK-PWR7-NEXT: std r19, 344(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r20, 352(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r21, 360(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r22, 368(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r23, 376(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r24, 384(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r25, 392(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r26, 400(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r27, 408(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r28, 416(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r29, 424(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: std r30, 432(r1) # 8-byte Folded Spill -; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3 +; CHECK-PWR7-NEXT: .cfi_offset r31, -8 +; CHECK-PWR7-NEXT: .cfi_offset r2, -152 ; CHECK-PWR7-NEXT: addi r3, r1, 320 -; CHECK-PWR7-NEXT: lbz r7, 304(r1) -; CHECK-PWR7-NEXT: stxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: lbz r8, 320(r1) -; CHECK-PWR7-NEXT: lbz r9, 305(r1) -; CHECK-PWR7-NEXT: lbz r10, 321(r1) -; CHECK-PWR7-NEXT: lbz r26, 325(r1) -; CHECK-PWR7-NEXT: clrlwi r7, r7, 24 -; CHECK-PWR7-NEXT: clrlwi r8, r8, 24 -; CHECK-PWR7-NEXT: clrlwi r9, r9, 24 -; CHECK-PWR7-NEXT: clrlwi r10, r10, 24 -; CHECK-PWR7-NEXT: lbz r11, 306(r1) -; CHECK-PWR7-NEXT: lbz r12, 322(r1) -; CHECK-PWR7-NEXT: lbz r23, 314(r1) -; CHECK-PWR7-NEXT: clrlwi r22, r26, 24 -; CHECK-PWR7-NEXT: lbz r26, 330(r1) -; CHECK-PWR7-NEXT: sub r8, r7, r8 -; CHECK-PWR7-NEXT: lbz r7, 315(r1) -; CHECK-PWR7-NEXT: sub r20, r9, r10 -; CHECK-PWR7-NEXT: lbz r9, 331(r1) -; CHECK-PWR7-NEXT: lbz r0, 307(r1) -; CHECK-PWR7-NEXT: lbz r30, 323(r1) -; CHECK-PWR7-NEXT: clrlwi r11, r11, 24 -; CHECK-PWR7-NEXT: clrlwi r12, r12, 24 -; CHECK-PWR7-NEXT: clrlwi r23, r23, 24 -; CHECK-PWR7-NEXT: clrlwi r21, r26, 24 -; CHECK-PWR7-NEXT: clrlwi r7, r7, 24 -; CHECK-PWR7-NEXT: clrlwi r9, r9, 24 -; CHECK-PWR7-NEXT: clrlwi r0, r0, 24 -; CHECK-PWR7-NEXT: clrlwi r30, r30, 24 -; CHECK-PWR7-NEXT: lbz r29, 308(r1) -; CHECK-PWR7-NEXT: lbz r28, 324(r1) -; CHECK-PWR7-NEXT: lbz r27, 309(r1) -; CHECK-PWR7-NEXT: lbz r25, 310(r1) -; CHECK-PWR7-NEXT: lbz r24, 326(r1) -; CHECK-PWR7-NEXT: sub r19, r11, r12 -; CHECK-PWR7-NEXT: sub r11, r23, r21 -; CHECK-PWR7-NEXT: sub r9, r7, r9 -; CHECK-PWR7-NEXT: sub r26, r0, r30 -; CHECK-PWR7-NEXT: srawi r12, r11, 31 -; CHECK-PWR7-NEXT: srawi r0, r9, 31 -; CHECK-PWR7-NEXT: lbz r3, 312(r1) -; CHECK-PWR7-NEXT: clrlwi r29, r29, 24 -; CHECK-PWR7-NEXT: clrlwi r28, r28, 24 -; CHECK-PWR7-NEXT: clrlwi r27, r27, 24 -; CHECK-PWR7-NEXT: clrlwi r25, r25, 24 -; CHECK-PWR7-NEXT: clrlwi r24, r24, 24 -; CHECK-PWR7-NEXT: xor r11, r11, r12 -; CHECK-PWR7-NEXT: xor r9, r9, r0 -; CHECK-PWR7-NEXT: sub r28, r29, r28 -; CHECK-PWR7-NEXT: sub r30, r27, r22 -; CHECK-PWR7-NEXT: sub r29, r25, r24 -; CHECK-PWR7-NEXT: sub r27, r11, r12 -; CHECK-PWR7-NEXT: sub r24, r9, r0 -; CHECK-PWR7-NEXT: lbz r9, 316(r1) -; CHECK-PWR7-NEXT: lbz r11, 332(r1) -; CHECK-PWR7-NEXT: lbz r4, 328(r1) -; CHECK-PWR7-NEXT: lbz r5, 311(r1) -; CHECK-PWR7-NEXT: lbz r6, 327(r1) -; CHECK-PWR7-NEXT: clrlwi r11, r11, 24 -; CHECK-PWR7-NEXT: clrlwi r3, r3, 24 -; CHECK-PWR7-NEXT: clrlwi r4, r4, 24 -; CHECK-PWR7-NEXT: clrlwi r5, r5, 24 -; CHECK-PWR7-NEXT: clrlwi r6, r6, 24 -; CHECK-PWR7-NEXT: sub r3, r3, r4 +; CHECK-PWR7-NEXT: std r14, 368(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r15, 376(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r16, 384(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r17, 392(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r18, 400(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r19, 408(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r20, 416(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r21, 424(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r22, 432(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r23, 440(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r24, 448(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r25, 456(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r26, 464(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r27, 472(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r28, 480(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r29, 488(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r30, 496(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r31, 504(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r2, 360(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3 +; CHECK-PWR7-NEXT: lbz r3, 320(r1) +; CHECK-PWR7-NEXT: addi r4, r1, 336 +; CHECK-PWR7-NEXT: stw r3, 60(r1) # 4-byte Folded Spill +; CHECK-PWR7-NEXT: stxvw4x v3, 0, r4 +; CHECK-PWR7-NEXT: lbz r15, 334(r1) +; CHECK-PWR7-NEXT: lbz r14, 350(r1) +; CHECK-PWR7-NEXT: lbz r31, 335(r1) +; CHECK-PWR7-NEXT: lbz r2, 351(r1) +; CHECK-PWR7-NEXT: sub r15, r15, r14 +; CHECK-PWR7-NEXT: sub r14, r31, r2 +; CHECK-PWR7-NEXT: srawi r2, r14, 31 +; CHECK-PWR7-NEXT: xor r14, r14, r2 +; CHECK-PWR7-NEXT: lbz r3, 333(r1) +; CHECK-PWR7-NEXT: lbz r19, 331(r1) +; CHECK-PWR7-NEXT: lbz r18, 347(r1) +; CHECK-PWR7-NEXT: sub r19, r19, r18 +; CHECK-PWR7-NEXT: lbz r17, 332(r1) +; CHECK-PWR7-NEXT: lbz r16, 348(r1) +; CHECK-PWR7-NEXT: sub r17, r17, r16 +; CHECK-PWR7-NEXT: lbz r23, 329(r1) +; CHECK-PWR7-NEXT: sub r14, r14, r2 +; CHECK-PWR7-NEXT: lbz r2, 349(r1) +; CHECK-PWR7-NEXT: lbz r22, 345(r1) +; CHECK-PWR7-NEXT: lbz r4, 336(r1) +; CHECK-PWR7-NEXT: lbz r5, 321(r1) +; CHECK-PWR7-NEXT: lbz r6, 337(r1) +; CHECK-PWR7-NEXT: lbz r7, 322(r1) +; CHECK-PWR7-NEXT: lbz r8, 338(r1) +; CHECK-PWR7-NEXT: lbz r9, 323(r1) +; CHECK-PWR7-NEXT: lbz r10, 339(r1) +; CHECK-PWR7-NEXT: lbz r11, 324(r1) +; CHECK-PWR7-NEXT: lbz r12, 340(r1) +; CHECK-PWR7-NEXT: lbz r0, 325(r1) +; CHECK-PWR7-NEXT: lbz r30, 341(r1) +; CHECK-PWR7-NEXT: lbz r29, 326(r1) +; CHECK-PWR7-NEXT: lbz r28, 342(r1) +; CHECK-PWR7-NEXT: lbz r27, 327(r1) +; CHECK-PWR7-NEXT: lbz r26, 343(r1) +; CHECK-PWR7-NEXT: sub r3, r3, r2 +; CHECK-PWR7-NEXT: lbz r25, 328(r1) +; CHECK-PWR7-NEXT: lbz r24, 344(r1) +; CHECK-PWR7-NEXT: lbz r21, 330(r1) +; CHECK-PWR7-NEXT: lbz r20, 346(r1) ; CHECK-PWR7-NEXT: sub r5, r5, r6 -; CHECK-PWR7-NEXT: clrlwi r9, r9, 24 -; CHECK-PWR7-NEXT: srawi r4, r3, 31 +; CHECK-PWR7-NEXT: srawi r18, r3, 31 +; CHECK-PWR7-NEXT: sub r7, r7, r8 +; CHECK-PWR7-NEXT: sub r9, r9, r10 +; CHECK-PWR7-NEXT: sub r11, r11, r12 +; CHECK-PWR7-NEXT: sub r0, r0, r30 +; CHECK-PWR7-NEXT: sub r29, r29, r28 +; CHECK-PWR7-NEXT: sub r27, r27, r26 +; CHECK-PWR7-NEXT: sub r25, r25, r24 +; CHECK-PWR7-NEXT: srawi r31, r15, 31 +; CHECK-PWR7-NEXT: ld r2, 360(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: xor r3, r3, r18 ; CHECK-PWR7-NEXT: srawi r6, r5, 31 -; CHECK-PWR7-NEXT: xor r3, r3, r4 -; CHECK-PWR7-NEXT: sldi r27, r27, 56 -; CHECK-PWR7-NEXT: xor r5, r5, r6 -; CHECK-PWR7-NEXT: sub r9, r9, r11 -; CHECK-PWR7-NEXT: sub r3, r3, r4 -; CHECK-PWR7-NEXT: sldi r24, r24, 56 +; CHECK-PWR7-NEXT: srawi r8, r7, 31 +; CHECK-PWR7-NEXT: srawi r10, r9, 31 +; CHECK-PWR7-NEXT: srawi r12, r11, 31 +; CHECK-PWR7-NEXT: srawi r30, r0, 31 +; CHECK-PWR7-NEXT: sub r3, r3, r18 +; CHECK-PWR7-NEXT: srawi r18, r19, 31 +; CHECK-PWR7-NEXT: srawi r28, r29, 31 +; CHECK-PWR7-NEXT: ld r16, 384(r1) # 8-byte Folded Reload ; CHECK-PWR7-NEXT: sldi r3, r3, 56 -; CHECK-PWR7-NEXT: srawi r11, r9, 31 -; CHECK-PWR7-NEXT: std r27, 208(r1) -; CHECK-PWR7-NEXT: sub r4, r5, r6 -; CHECK-PWR7-NEXT: std r27, 216(r1) -; CHECK-PWR7-NEXT: srawi r27, r29, 31 -; CHECK-PWR7-NEXT: lbz r10, 313(r1) -; CHECK-PWR7-NEXT: xor r9, r9, r11 -; CHECK-PWR7-NEXT: std r24, 224(r1) -; CHECK-PWR7-NEXT: lbz r22, 329(r1) -; CHECK-PWR7-NEXT: std r24, 232(r1) -; CHECK-PWR7-NEXT: srawi r24, r30, 31 -; CHECK-PWR7-NEXT: ld r21, 360(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: sub r23, r9, r11 -; CHECK-PWR7-NEXT: lbz r9, 317(r1) -; CHECK-PWR7-NEXT: lbz r11, 333(r1) -; CHECK-PWR7-NEXT: xor r29, r29, r27 -; CHECK-PWR7-NEXT: std r3, 176(r1) -; CHECK-PWR7-NEXT: std r3, 184(r1) -; CHECK-PWR7-NEXT: sldi r3, r4, 56 -; CHECK-PWR7-NEXT: sldi r23, r23, 56 -; CHECK-PWR7-NEXT: xor r30, r30, r24 -; CHECK-PWR7-NEXT: clrlwi r9, r9, 24 -; CHECK-PWR7-NEXT: clrlwi r11, r11, 24 -; CHECK-PWR7-NEXT: sub r4, r30, r24 -; CHECK-PWR7-NEXT: ld r30, 432(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: std r3, 160(r1) -; CHECK-PWR7-NEXT: std r3, 168(r1) -; CHECK-PWR7-NEXT: sub r9, r9, r11 -; CHECK-PWR7-NEXT: sub r3, r29, r27 -; CHECK-PWR7-NEXT: std r23, 240(r1) -; CHECK-PWR7-NEXT: ld r29, 424(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: srawi r11, r9, 31 -; CHECK-PWR7-NEXT: std r23, 248(r1) -; CHECK-PWR7-NEXT: ld r27, 408(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: srawi r23, r28, 31 +; CHECK-PWR7-NEXT: srawi r26, r27, 31 +; CHECK-PWR7-NEXT: srawi r24, r25, 31 +; CHECK-PWR7-NEXT: xor r19, r19, r18 +; CHECK-PWR7-NEXT: xor r15, r15, r31 +; CHECK-PWR7-NEXT: xor r5, r5, r6 +; CHECK-PWR7-NEXT: std r3, 272(r1) +; CHECK-PWR7-NEXT: std r3, 280(r1) +; CHECK-PWR7-NEXT: srawi r3, r17, 31 +; CHECK-PWR7-NEXT: sub r19, r19, r18 +; CHECK-PWR7-NEXT: xor r7, r7, r8 +; CHECK-PWR7-NEXT: sub r15, r15, r31 +; CHECK-PWR7-NEXT: xor r17, r17, r3 +; CHECK-PWR7-NEXT: xor r9, r9, r10 +; CHECK-PWR7-NEXT: xor r11, r11, r12 +; CHECK-PWR7-NEXT: xor r0, r0, r30 +; CHECK-PWR7-NEXT: xor r29, r29, r28 +; CHECK-PWR7-NEXT: xor r27, r27, r26 +; CHECK-PWR7-NEXT: sub r3, r17, r3 +; CHECK-PWR7-NEXT: xor r25, r25, r24 +; CHECK-PWR7-NEXT: sub r25, r25, r24 +; CHECK-PWR7-NEXT: sub r27, r27, r26 +; CHECK-PWR7-NEXT: sub r29, r29, r28 ; CHECK-PWR7-NEXT: sldi r3, r3, 56 -; CHECK-PWR7-NEXT: xor r28, r28, r23 -; CHECK-PWR7-NEXT: xor r9, r9, r11 -; CHECK-PWR7-NEXT: std r3, 144(r1) -; CHECK-PWR7-NEXT: ld r24, 384(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: std r3, 152(r1) -; CHECK-PWR7-NEXT: sldi r3, r4, 56 -; CHECK-PWR7-NEXT: sub r25, r9, r11 -; CHECK-PWR7-NEXT: lbz r9, 318(r1) -; CHECK-PWR7-NEXT: lbz r11, 334(r1) -; CHECK-PWR7-NEXT: std r3, 128(r1) +; CHECK-PWR7-NEXT: sub r0, r0, r30 +; CHECK-PWR7-NEXT: sub r11, r11, r12 +; CHECK-PWR7-NEXT: sub r9, r9, r10 +; CHECK-PWR7-NEXT: sub r7, r7, r8 +; CHECK-PWR7-NEXT: sub r5, r5, r6 +; CHECK-PWR7-NEXT: sldi r14, r14, 56 +; CHECK-PWR7-NEXT: sldi r15, r15, 56 +; CHECK-PWR7-NEXT: ld r31, 504(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r3, 256(r1) +; CHECK-PWR7-NEXT: std r3, 264(r1) +; CHECK-PWR7-NEXT: sldi r3, r19, 56 ; CHECK-PWR7-NEXT: sldi r25, r25, 56 -; CHECK-PWR7-NEXT: std r3, 136(r1) -; CHECK-PWR7-NEXT: sub r3, r28, r23 +; CHECK-PWR7-NEXT: sldi r27, r27, 56 +; CHECK-PWR7-NEXT: std r3, 240(r1) +; CHECK-PWR7-NEXT: std r3, 248(r1) +; CHECK-PWR7-NEXT: sub r3, r23, r22 +; CHECK-PWR7-NEXT: srawi r23, r3, 31 +; CHECK-PWR7-NEXT: sub r22, r21, r20 +; CHECK-PWR7-NEXT: srawi r21, r22, 31 +; CHECK-PWR7-NEXT: sldi r29, r29, 56 +; CHECK-PWR7-NEXT: sldi r0, r0, 56 +; CHECK-PWR7-NEXT: sldi r11, r11, 56 +; CHECK-PWR7-NEXT: xor r3, r3, r23 +; CHECK-PWR7-NEXT: xor r22, r22, r21 +; CHECK-PWR7-NEXT: sldi r9, r9, 56 +; CHECK-PWR7-NEXT: sldi r7, r7, 56 +; CHECK-PWR7-NEXT: sldi r5, r5, 56 +; CHECK-PWR7-NEXT: ld r30, 496(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r28, 480(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: sub r3, r3, r23 +; CHECK-PWR7-NEXT: sub r22, r22, r21 +; CHECK-PWR7-NEXT: std r14, 304(r1) +; CHECK-PWR7-NEXT: ld r26, 464(r1) # 8-byte Folded Reload ; CHECK-PWR7-NEXT: sldi r3, r3, 56 -; CHECK-PWR7-NEXT: std r3, 112(r1) -; CHECK-PWR7-NEXT: ld r28, 416(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: clrlwi r9, r9, 24 -; CHECK-PWR7-NEXT: clrlwi r11, r11, 24 -; CHECK-PWR7-NEXT: clrlwi r10, r10, 24 -; CHECK-PWR7-NEXT: std r25, 256(r1) -; CHECK-PWR7-NEXT: std r25, 264(r1) -; CHECK-PWR7-NEXT: sub r9, r9, r11 -; CHECK-PWR7-NEXT: srawi r25, r26, 31 -; CHECK-PWR7-NEXT: xor r26, r26, r25 -; CHECK-PWR7-NEXT: ld r23, 376(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: srawi r11, r9, 31 -; CHECK-PWR7-NEXT: std r3, 120(r1) -; CHECK-PWR7-NEXT: sub r4, r26, r25 -; CHECK-PWR7-NEXT: clrlwi r22, r22, 24 -; CHECK-PWR7-NEXT: srawi r7, r8, 31 -; CHECK-PWR7-NEXT: sub r10, r10, r22 -; CHECK-PWR7-NEXT: ld r26, 400(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: xor r9, r9, r11 -; CHECK-PWR7-NEXT: sldi r3, r4, 56 -; CHECK-PWR7-NEXT: srawi r22, r10, 31 -; CHECK-PWR7-NEXT: xor r8, r8, r7 -; CHECK-PWR7-NEXT: xor r10, r10, r22 -; CHECK-PWR7-NEXT: sub r10, r10, r22 -; CHECK-PWR7-NEXT: ld r25, 392(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: sub r12, r9, r11 -; CHECK-PWR7-NEXT: lbz r9, 319(r1) -; CHECK-PWR7-NEXT: lbz r11, 335(r1) -; CHECK-PWR7-NEXT: std r3, 96(r1) -; CHECK-PWR7-NEXT: sldi r12, r12, 56 -; CHECK-PWR7-NEXT: std r3, 104(r1) -; CHECK-PWR7-NEXT: ld r22, 368(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: sldi r10, r10, 56 -; CHECK-PWR7-NEXT: std r10, 192(r1) -; CHECK-PWR7-NEXT: clrlwi r9, r9, 24 -; CHECK-PWR7-NEXT: clrlwi r11, r11, 24 -; CHECK-PWR7-NEXT: sub r9, r9, r11 -; CHECK-PWR7-NEXT: std r12, 272(r1) -; CHECK-PWR7-NEXT: std r12, 280(r1) -; CHECK-PWR7-NEXT: srawi r12, r19, 31 -; CHECK-PWR7-NEXT: xor r0, r19, r12 -; CHECK-PWR7-NEXT: ld r19, 344(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: sub r3, r0, r12 -; CHECK-PWR7-NEXT: srawi r11, r9, 31 -; CHECK-PWR7-NEXT: std r10, 200(r1) -; CHECK-PWR7-NEXT: xor r9, r9, r11 +; CHECK-PWR7-NEXT: sldi r22, r22, 56 +; CHECK-PWR7-NEXT: ld r24, 448(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r23, 440(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r14, 312(r1) +; CHECK-PWR7-NEXT: std r15, 288(r1) +; CHECK-PWR7-NEXT: std r3, 208(r1) +; CHECK-PWR7-NEXT: std r3, 216(r1) +; CHECK-PWR7-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload +; CHECK-PWR7-NEXT: std r15, 296(r1) +; CHECK-PWR7-NEXT: ld r21, 424(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r20, 416(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r22, 224(r1) +; CHECK-PWR7-NEXT: std r22, 232(r1) +; CHECK-PWR7-NEXT: sub r4, r3, r4 +; CHECK-PWR7-NEXT: std r25, 192(r1) +; CHECK-PWR7-NEXT: ld r22, 432(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r19, 408(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: srawi r3, r4, 31 +; CHECK-PWR7-NEXT: std r25, 200(r1) +; CHECK-PWR7-NEXT: ld r25, 456(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r27, 176(r1) +; CHECK-PWR7-NEXT: std r27, 184(r1) +; CHECK-PWR7-NEXT: xor r4, r4, r3 +; CHECK-PWR7-NEXT: std r29, 160(r1) +; CHECK-PWR7-NEXT: ld r27, 472(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r29, 168(r1) +; CHECK-PWR7-NEXT: std r0, 144(r1) +; CHECK-PWR7-NEXT: sub r3, r4, r3 +; CHECK-PWR7-NEXT: std r0, 152(r1) +; CHECK-PWR7-NEXT: ld r29, 488(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r18, 400(r1) # 8-byte Folded Reload ; CHECK-PWR7-NEXT: sldi r3, r3, 56 -; CHECK-PWR7-NEXT: sub r9, r9, r11 -; CHECK-PWR7-NEXT: std r3, 80(r1) -; CHECK-PWR7-NEXT: std r3, 88(r1) -; CHECK-PWR7-NEXT: sldi r9, r9, 56 -; CHECK-PWR7-NEXT: std r9, 288(r1) -; CHECK-PWR7-NEXT: std r9, 296(r1) -; CHECK-PWR7-NEXT: srawi r9, r20, 31 -; CHECK-PWR7-NEXT: xor r11, r20, r9 -; CHECK-PWR7-NEXT: ld r20, 352(r1) # 8-byte Folded Reload -; CHECK-PWR7-NEXT: sub r4, r11, r9 -; CHECK-PWR7-NEXT: sldi r3, r4, 56 +; CHECK-PWR7-NEXT: std r11, 128(r1) +; CHECK-PWR7-NEXT: ld r17, 392(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r11, 136(r1) +; CHECK-PWR7-NEXT: std r9, 112(r1) ; CHECK-PWR7-NEXT: std r3, 64(r1) ; CHECK-PWR7-NEXT: std r3, 72(r1) -; CHECK-PWR7-NEXT: sub r3, r8, r7 -; CHECK-PWR7-NEXT: sldi r3, r3, 56 -; CHECK-PWR7-NEXT: std r3, 48(r1) -; CHECK-PWR7-NEXT: std r3, 56(r1) -; CHECK-PWR7-NEXT: addi r3, r1, 288 +; CHECK-PWR7-NEXT: addi r3, r1, 304 +; CHECK-PWR7-NEXT: std r9, 120(r1) +; CHECK-PWR7-NEXT: ld r15, 376(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: std r7, 96(r1) +; CHECK-PWR7-NEXT: std r7, 104(r1) +; CHECK-PWR7-NEXT: std r5, 80(r1) +; CHECK-PWR7-NEXT: std r5, 88(r1) ; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 272 +; CHECK-PWR7-NEXT: addi r3, r1, 288 ; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 256 +; CHECK-PWR7-NEXT: addi r3, r1, 272 +; CHECK-PWR7-NEXT: ld r14, 368(r1) # 8-byte Folded Reload ; CHECK-PWR7-NEXT: vmrghb v2, v3, v2 ; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 240 +; CHECK-PWR7-NEXT: addi r3, r1, 256 ; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 224 +; CHECK-PWR7-NEXT: addi r3, r1, 240 ; CHECK-PWR7-NEXT: vmrghb v3, v4, v3 ; CHECK-PWR7-NEXT: vmrghh v2, v3, v2 ; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 208 +; CHECK-PWR7-NEXT: addi r3, r1, 224 ; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 192 +; CHECK-PWR7-NEXT: addi r3, r1, 208 ; CHECK-PWR7-NEXT: vmrghb v3, v4, v3 ; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 176 +; CHECK-PWR7-NEXT: addi r3, r1, 192 ; CHECK-PWR7-NEXT: lxvw4x v5, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 160 +; CHECK-PWR7-NEXT: addi r3, r1, 176 ; CHECK-PWR7-NEXT: vmrghb v4, v5, v4 ; CHECK-PWR7-NEXT: vmrghh v3, v4, v3 ; CHECK-PWR7-NEXT: xxmrghw vs0, v3, v2 ; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 144 +; CHECK-PWR7-NEXT: addi r3, r1, 160 ; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 128 +; CHECK-PWR7-NEXT: addi r3, r1, 144 ; CHECK-PWR7-NEXT: vmrghb v2, v3, v2 ; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 112 +; CHECK-PWR7-NEXT: addi r3, r1, 128 ; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 96 ; CHECK-PWR7-NEXT: vmrghb v3, v4, v3 +; CHECK-PWR7-NEXT: addi r3, r1, 112 ; CHECK-PWR7-NEXT: vmrghh v2, v3, v2 ; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 80 +; CHECK-PWR7-NEXT: addi r3, r1, 96 ; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 64 +; CHECK-PWR7-NEXT: addi r3, r1, 80 ; CHECK-PWR7-NEXT: vmrghb v3, v4, v3 ; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 -; CHECK-PWR7-NEXT: addi r3, r1, 48 +; CHECK-PWR7-NEXT: addi r3, r1, 64 ; CHECK-PWR7-NEXT: lxvw4x v5, 0, r3 ; CHECK-PWR7-NEXT: vmrghb v4, v5, v4 ; CHECK-PWR7-NEXT: vmrghh v3, v4, v3 ; CHECK-PWR7-NEXT: xxmrghw vs1, v3, v2 ; CHECK-PWR7-NEXT: xxmrghd v2, vs1, vs0 -; CHECK-PWR7-NEXT: addi r1, r1, 448 +; CHECK-PWR7-NEXT: addi r1, r1, 512 ; CHECK-PWR7-NEXT: blr entry: %vecext = extractelement <16 x i8> %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll index 246e6a6..117e3e4 100644 --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -3292,30 +3292,30 @@ define i64 @ustest_f64i64_mm(double %x) { ; RV32IF-NEXT: mv a1, a0 ; RV32IF-NEXT: addi a0, sp, 8 ; RV32IF-NEXT: call __fixdfti -; RV32IF-NEXT: lw a0, 8(sp) -; RV32IF-NEXT: lw a1, 12(sp) -; RV32IF-NEXT: lw a2, 20(sp) +; RV32IF-NEXT: lw a0, 20(sp) +; RV32IF-NEXT: lw a1, 8(sp) +; RV32IF-NEXT: lw a2, 12(sp) ; RV32IF-NEXT: lw a3, 16(sp) -; RV32IF-NEXT: beqz a2, .LBB47_2 +; RV32IF-NEXT: beqz a0, .LBB47_2 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: slti a4, a2, 0 +; RV32IF-NEXT: slti a4, a0, 0 ; RV32IF-NEXT: j .LBB47_3 ; RV32IF-NEXT: .LBB47_2: ; RV32IF-NEXT: seqz a4, a3 ; RV32IF-NEXT: .LBB47_3: # %entry ; RV32IF-NEXT: xori a3, a3, 1 -; RV32IF-NEXT: or a3, a3, a2 +; RV32IF-NEXT: or a3, a3, a0 ; RV32IF-NEXT: seqz a3, a3 ; RV32IF-NEXT: addi a3, a3, -1 ; RV32IF-NEXT: and a3, a3, a4 ; RV32IF-NEXT: neg a3, a3 +; RV32IF-NEXT: and a2, a3, a2 ; RV32IF-NEXT: and a1, a3, a1 ; RV32IF-NEXT: and a0, a3, a0 -; RV32IF-NEXT: and a2, a3, a2 -; RV32IF-NEXT: slti a2, a2, 0 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: and a1, a2, a1 +; RV32IF-NEXT: slti a0, a0, 0 +; RV32IF-NEXT: addi a3, a0, -1 +; RV32IF-NEXT: and a0, a3, a1 +; RV32IF-NEXT: and a1, a3, a2 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: .cfi_restore ra ; RV32IF-NEXT: addi sp, sp, 32 @@ -3354,30 +3354,30 @@ define i64 @ustest_f64i64_mm(double %x) { ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call __fixdfti -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) -; RV32IFD-NEXT: lw a2, 20(sp) +; RV32IFD-NEXT: lw a0, 20(sp) +; RV32IFD-NEXT: lw a1, 8(sp) +; RV32IFD-NEXT: lw a2, 12(sp) ; RV32IFD-NEXT: lw a3, 16(sp) -; RV32IFD-NEXT: beqz a2, .LBB47_2 +; RV32IFD-NEXT: beqz a0, .LBB47_2 ; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: slti a4, a2, 0 +; RV32IFD-NEXT: slti a4, a0, 0 ; RV32IFD-NEXT: j .LBB47_3 ; RV32IFD-NEXT: .LBB47_2: ; RV32IFD-NEXT: seqz a4, a3 ; RV32IFD-NEXT: .LBB47_3: # %entry ; RV32IFD-NEXT: xori a3, a3, 1 -; RV32IFD-NEXT: or a3, a3, a2 +; RV32IFD-NEXT: or a3, a3, a0 ; RV32IFD-NEXT: seqz a3, a3 ; RV32IFD-NEXT: addi a3, a3, -1 ; RV32IFD-NEXT: and a3, a3, a4 ; RV32IFD-NEXT: neg a3, a3 +; RV32IFD-NEXT: and a2, a3, a2 ; RV32IFD-NEXT: and a1, a3, a1 ; RV32IFD-NEXT: and a0, a3, a0 -; RV32IFD-NEXT: and a2, a3, a2 -; RV32IFD-NEXT: slti a2, a2, 0 -; RV32IFD-NEXT: addi a2, a2, -1 -; RV32IFD-NEXT: and a0, a2, a0 -; RV32IFD-NEXT: and a1, a2, a1 +; RV32IFD-NEXT: slti a0, a0, 0 +; RV32IFD-NEXT: addi a3, a0, -1 +; RV32IFD-NEXT: and a0, a3, a1 +; RV32IFD-NEXT: and a1, a3, a2 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: .cfi_restore ra ; RV32IFD-NEXT: addi sp, sp, 32 @@ -3530,30 +3530,30 @@ define i64 @ustest_f32i64_mm(float %x) { ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixsfti -; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: lw a1, 12(sp) -; RV32-NEXT: lw a2, 20(sp) +; RV32-NEXT: lw a0, 20(sp) +; RV32-NEXT: lw a1, 8(sp) +; RV32-NEXT: lw a2, 12(sp) ; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: beqz a2, .LBB50_2 +; RV32-NEXT: beqz a0, .LBB50_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: slti a4, a2, 0 +; RV32-NEXT: slti a4, a0, 0 ; RV32-NEXT: j .LBB50_3 ; RV32-NEXT: .LBB50_2: ; RV32-NEXT: seqz a4, a3 ; RV32-NEXT: .LBB50_3: # %entry ; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a2 +; RV32-NEXT: or a3, a3, a0 ; RV32-NEXT: seqz a3, a3 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: and a3, a3, a4 ; RV32-NEXT: neg a3, a3 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: and a0, a3, a0 -; RV32-NEXT: and a2, a3, a2 -; RV32-NEXT: slti a2, a2, 0 -; RV32-NEXT: addi a2, a2, -1 -; RV32-NEXT: and a0, a2, a0 -; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: slti a0, a0, 0 +; RV32-NEXT: addi a3, a0, -1 +; RV32-NEXT: and a0, a3, a1 +; RV32-NEXT: and a1, a3, a2 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 @@ -3767,30 +3767,30 @@ define i64 @ustest_f16i64_mm(half %x) { ; RV32-NEXT: call __extendhfsf2 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixsfti -; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: lw a1, 12(sp) -; RV32-NEXT: lw a2, 20(sp) +; RV32-NEXT: lw a0, 20(sp) +; RV32-NEXT: lw a1, 8(sp) +; RV32-NEXT: lw a2, 12(sp) ; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: beqz a2, .LBB53_2 +; RV32-NEXT: beqz a0, .LBB53_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: slti a4, a2, 0 +; RV32-NEXT: slti a4, a0, 0 ; RV32-NEXT: j .LBB53_3 ; RV32-NEXT: .LBB53_2: ; RV32-NEXT: seqz a4, a3 ; RV32-NEXT: .LBB53_3: # %entry ; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a2 +; RV32-NEXT: or a3, a3, a0 ; RV32-NEXT: seqz a3, a3 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: and a3, a3, a4 ; RV32-NEXT: neg a3, a3 +; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: and a0, a3, a0 -; RV32-NEXT: and a2, a3, a2 -; RV32-NEXT: slti a2, a2, 0 -; RV32-NEXT: addi a2, a2, -1 -; RV32-NEXT: and a0, a2, a0 -; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: slti a0, a0, 0 +; RV32-NEXT: addi a3, a0, -1 +; RV32-NEXT: and a0, a3, a1 +; RV32-NEXT: and a1, a3, a2 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr.ll b/llvm/test/CodeGen/RISCV/interrupt-attr.ll index e278b8d..472b903 100644 --- a/llvm/test/CodeGen/RISCV/interrupt-attr.ll +++ b/llvm/test/CodeGen/RISCV/interrupt-attr.ll @@ -794,498 +794,46 @@ define void @foo_with_call() #1 { ; CHECK-RV32-V-NEXT: slli a0, a0, 5 ; CHECK-RV32-V-NEXT: sub sp, sp, a0 ; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 5 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: mv a1, a0 ; CHECK-RV32-V-NEXT: slli a0, a0, 1 ; CHECK-RV32-V-NEXT: add a0, a0, a1 ; CHECK-RV32-V-NEXT: add a0, sp, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 4 ; CHECK-RV32-V-NEXT: add a0, sp, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: add a0, sp, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: addi a0, sp, 16 -; CHECK-RV32-V-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: call otherfoo ; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 5 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 ; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: mv a1, a0 ; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: add a0, a0, a1 ; CHECK-RV32-V-NEXT: add a0, sp, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 4 ; CHECK-RV32-V-NEXT: add a0, sp, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: add a0, sp, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: add a0, sp, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, 16 -; CHECK-RV32-V-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: addi a0, sp, 16 -; CHECK-RV32-V-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 5 ; CHECK-RV32-V-NEXT: add sp, sp, a0 @@ -1351,498 +899,46 @@ define void @foo_with_call() #1 { ; CHECK-RV32-FV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FV-NEXT: sub sp, sp, a0 ; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: mv a1, a0 ; CHECK-RV32-FV-NEXT: slli a0, a0, 1 ; CHECK-RV32-FV-NEXT: add a0, a0, a1 ; CHECK-RV32-FV-NEXT: add a0, sp, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FV-NEXT: add a0, sp, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: add a0, sp, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: addi a0, sp, 16 -; CHECK-RV32-FV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: call otherfoo ; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: mv a1, a0 ; CHECK-RV32-FV-NEXT: slli a0, a0, 1 ; CHECK-RV32-FV-NEXT: add a0, a0, a1 ; CHECK-RV32-FV-NEXT: add a0, sp, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FV-NEXT: add a0, sp, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: add a0, sp, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: add a0, sp, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: addi a0, sp, 16 -; CHECK-RV32-FV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FV-NEXT: add sp, sp, a0 @@ -1928,498 +1024,46 @@ define void @foo_with_call() #1 { ; CHECK-RV32-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FDV-NEXT: sub sp, sp, a0 ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: mv a1, a0 ; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: add a0, a0, a1 ; CHECK-RV32-FDV-NEXT: add a0, sp, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FDV-NEXT: add a0, sp, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: add a0, sp, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: addi a0, sp, 16 -; CHECK-RV32-FDV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: call otherfoo ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: mv a1, a0 ; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 ; CHECK-RV32-FDV-NEXT: add a0, a0, a1 ; CHECK-RV32-FDV-NEXT: add a0, sp, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FDV-NEXT: add a0, sp, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: add a0, sp, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: add a0, sp, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: addi a0, sp, 16 -; CHECK-RV32-FDV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FDV-NEXT: add sp, sp, a0 @@ -3259,498 +1903,46 @@ define void @foo_with_call() #1 { ; CHECK-RV64-V-NEXT: slli a0, a0, 5 ; CHECK-RV64-V-NEXT: sub sp, sp, a0 ; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 5 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: mv a1, a0 ; CHECK-RV64-V-NEXT: slli a0, a0, 1 ; CHECK-RV64-V-NEXT: add a0, a0, a1 ; CHECK-RV64-V-NEXT: add a0, sp, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 4 ; CHECK-RV64-V-NEXT: add a0, sp, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: add a0, sp, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: addi a0, sp, 16 -; CHECK-RV64-V-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: call otherfoo ; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 5 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 ; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: mv a1, a0 ; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: add a0, a0, a1 ; CHECK-RV64-V-NEXT: add a0, sp, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 4 ; CHECK-RV64-V-NEXT: add a0, sp, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: add a0, sp, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: add a0, sp, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, 16 -; CHECK-RV64-V-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: addi a0, sp, 16 -; CHECK-RV64-V-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 5 ; CHECK-RV64-V-NEXT: add sp, sp, a0 @@ -3816,498 +2008,46 @@ define void @foo_with_call() #1 { ; CHECK-RV64-FV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FV-NEXT: sub sp, sp, a0 ; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: mv a1, a0 ; CHECK-RV64-FV-NEXT: slli a0, a0, 1 ; CHECK-RV64-FV-NEXT: add a0, a0, a1 ; CHECK-RV64-FV-NEXT: add a0, sp, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FV-NEXT: add a0, sp, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: add a0, sp, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: addi a0, sp, 16 -; CHECK-RV64-FV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: call otherfoo ; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: mv a1, a0 ; CHECK-RV64-FV-NEXT: slli a0, a0, 1 ; CHECK-RV64-FV-NEXT: add a0, a0, a1 ; CHECK-RV64-FV-NEXT: add a0, sp, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FV-NEXT: add a0, sp, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: add a0, sp, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: add a0, sp, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: addi a0, sp, 16 -; CHECK-RV64-FV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FV-NEXT: add sp, sp, a0 @@ -4393,498 +2133,46 @@ define void @foo_with_call() #1 { ; CHECK-RV64-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FDV-NEXT: sub sp, sp, a0 ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: mv a1, a0 ; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: add a0, a0, a1 ; CHECK-RV64-FDV-NEXT: add a0, sp, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FDV-NEXT: add a0, sp, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: add a0, sp, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: addi a0, sp, 16 -; CHECK-RV64-FDV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: call otherfoo ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: mv a1, a0 ; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 ; CHECK-RV64-FDV-NEXT: add a0, a0, a1 ; CHECK-RV64-FDV-NEXT: add a0, sp, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FDV-NEXT: add a0, sp, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: add a0, sp, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: add a0, sp, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: addi a0, sp, 16 -; CHECK-RV64-FDV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FDV-NEXT: add sp, sp, a0 @@ -5670,422 +2958,39 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-V-NEXT: slli a0, a0, 5 ; CHECK-RV32-V-NEXT: sub sp, sp, a0 ; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 4 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 ; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 ; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 ; CHECK-RV32-V-NEXT: slli a0, a0, 1 ; CHECK-RV32-V-NEXT: add a0, a0, a1 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 5 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 5 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-V-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-V-NEXT: call otherfoo ; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 4 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 4 -; CHECK-RV32-V-NEXT: add a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 3 ; CHECK-RV32-V-NEXT: mv a1, a0 @@ -6093,81 +2998,12 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-V-NEXT: add a0, a0, a1 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 3 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 2 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: mv a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a1, a1, a0 -; CHECK-RV32-V-NEXT: slli a0, a0, 1 -; CHECK-RV32-V-NEXT: add a0, a0, a1 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-V-NEXT: csrr a0, vlenb -; CHECK-RV32-V-NEXT: slli a1, a0, 5 -; CHECK-RV32-V-NEXT: sub a0, a1, a0 -; CHECK-RV32-V-NEXT: sub a0, s0, a0 -; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: csrr a0, vlenb ; CHECK-RV32-V-NEXT: slli a0, a0, 5 ; CHECK-RV32-V-NEXT: sub a0, s0, a0 ; CHECK-RV32-V-NEXT: addi a0, a0, -80 -; CHECK-RV32-V-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-V-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-V-NEXT: addi sp, s0, -80 ; CHECK-RV32-V-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; CHECK-RV32-V-NEXT: lw t0, 72(sp) # 4-byte Folded Reload @@ -6234,172 +3070,15 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-FV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FV-NEXT: sub sp, sp, a0 ; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: mv a1, a0 @@ -6407,331 +3086,36 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-FV-NEXT: add a0, a0, a1 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FV-NEXT: call otherfoo ; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FV-NEXT: add a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 ; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 ; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 ; CHECK-RV32-FV-NEXT: slli a0, a0, 1 ; CHECK-RV32-FV-NEXT: add a0, a0, a1 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: mv a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a1, a1, a0 -; CHECK-RV32-FV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FV-NEXT: add a0, a0, a1 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FV-NEXT: csrr a0, vlenb -; CHECK-RV32-FV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: csrr a0, vlenb ; CHECK-RV32-FV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FV-NEXT: addi a0, a0, -160 -; CHECK-RV32-FV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FV-NEXT: addi sp, s0, -160 ; CHECK-RV32-FV-NEXT: lw ra, 156(sp) # 4-byte Folded Reload ; CHECK-RV32-FV-NEXT: lw t0, 152(sp) # 4-byte Folded Reload @@ -6818,172 +3202,15 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FDV-NEXT: sub sp, sp, a0 ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: mv a1, a0 @@ -6991,249 +3218,23 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-FDV-NEXT: add a0, a0, a1 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-FDV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-FDV-NEXT: call otherfoo ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV32-FDV-NEXT: add a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV32-FDV-NEXT: mv a1, a0 @@ -7241,81 +3242,12 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV32-FDV-NEXT: add a0, a0, a1 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: mv a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a1, a1, a0 -; CHECK-RV32-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV32-FDV-NEXT: add a0, a0, a1 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-FDV-NEXT: csrr a0, vlenb -; CHECK-RV32-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV32-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: csrr a0, vlenb ; CHECK-RV32-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV32-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV32-FDV-NEXT: addi a0, a0, -240 -; CHECK-RV32-FDV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-FDV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-FDV-NEXT: addi sp, s0, -240 ; CHECK-RV32-FDV-NEXT: lw ra, 236(sp) # 4-byte Folded Reload ; CHECK-RV32-FDV-NEXT: lw t0, 232(sp) # 4-byte Folded Reload @@ -8186,422 +4118,39 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-V-NEXT: slli a0, a0, 5 ; CHECK-RV64-V-NEXT: sub sp, sp, a0 ; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 4 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 ; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 ; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 ; CHECK-RV64-V-NEXT: slli a0, a0, 1 ; CHECK-RV64-V-NEXT: add a0, a0, a1 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 5 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 5 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-V-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-V-NEXT: call otherfoo ; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 4 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 4 -; CHECK-RV64-V-NEXT: add a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 3 ; CHECK-RV64-V-NEXT: mv a1, a0 @@ -8609,81 +4158,12 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-V-NEXT: add a0, a0, a1 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 3 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 2 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: mv a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a1, a1, a0 -; CHECK-RV64-V-NEXT: slli a0, a0, 1 -; CHECK-RV64-V-NEXT: add a0, a0, a1 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-V-NEXT: csrr a0, vlenb -; CHECK-RV64-V-NEXT: slli a1, a0, 5 -; CHECK-RV64-V-NEXT: sub a0, a1, a0 -; CHECK-RV64-V-NEXT: sub a0, s0, a0 -; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: csrr a0, vlenb ; CHECK-RV64-V-NEXT: slli a0, a0, 5 ; CHECK-RV64-V-NEXT: sub a0, s0, a0 ; CHECK-RV64-V-NEXT: addi a0, a0, -160 -; CHECK-RV64-V-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-V-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-V-NEXT: addi sp, s0, -160 ; CHECK-RV64-V-NEXT: ld ra, 152(sp) # 8-byte Folded Reload ; CHECK-RV64-V-NEXT: ld t0, 144(sp) # 8-byte Folded Reload @@ -8750,172 +4230,15 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-FV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FV-NEXT: sub sp, sp, a0 ; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: mv a1, a0 @@ -8923,331 +4246,36 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-FV-NEXT: add a0, a0, a1 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FV-NEXT: call otherfoo ; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FV-NEXT: add a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 ; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 ; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 ; CHECK-RV64-FV-NEXT: slli a0, a0, 1 ; CHECK-RV64-FV-NEXT: add a0, a0, a1 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: mv a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a1, a1, a0 -; CHECK-RV64-FV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FV-NEXT: add a0, a0, a1 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FV-NEXT: csrr a0, vlenb -; CHECK-RV64-FV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: csrr a0, vlenb ; CHECK-RV64-FV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FV-NEXT: addi a0, a0, -240 -; CHECK-RV64-FV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FV-NEXT: addi sp, s0, -240 ; CHECK-RV64-FV-NEXT: ld ra, 232(sp) # 8-byte Folded Reload ; CHECK-RV64-FV-NEXT: ld t0, 224(sp) # 8-byte Folded Reload @@ -9334,172 +4362,15 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FDV-NEXT: sub sp, sp, a0 ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v0, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v2, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v4, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v6, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: mv a1, a0 @@ -9507,249 +4378,23 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-FDV-NEXT: add a0, a0, a1 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v25, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v26, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v27, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v28, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v29, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v30, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vs1r.v v31, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV64-FDV-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV64-FDV-NEXT: call otherfoo ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v2, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v4, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v6, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 4 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 4 -; CHECK-RV64-FDV-NEXT: add a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 ; CHECK-RV64-FDV-NEXT: mv a1, a0 @@ -9757,81 +4402,12 @@ define void @foo_fp_with_call() #2 { ; CHECK-RV64-FDV-NEXT: add a0, a0, a1 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 3 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v25, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v26, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v27, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 2 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v28, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: mv a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a1, a1, a0 -; CHECK-RV64-FDV-NEXT: slli a0, a0, 1 -; CHECK-RV64-FDV-NEXT: add a0, a0, a1 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v29, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV64-FDV-NEXT: csrr a0, vlenb -; CHECK-RV64-FDV-NEXT: slli a1, a0, 5 -; CHECK-RV64-FDV-NEXT: sub a0, a1, a0 -; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 -; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v30, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: csrr a0, vlenb ; CHECK-RV64-FDV-NEXT: slli a0, a0, 5 ; CHECK-RV64-FDV-NEXT: sub a0, s0, a0 ; CHECK-RV64-FDV-NEXT: addi a0, a0, -320 -; CHECK-RV64-FDV-NEXT: vl1r.v v31, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV64-FDV-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV64-FDV-NEXT: addi sp, s0, -320 ; CHECK-RV64-FDV-NEXT: ld ra, 312(sp) # 8-byte Folded Reload ; CHECK-RV64-FDV-NEXT: ld t0, 304(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll index 87c8343..a06c750 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll @@ -7,18 +7,18 @@ define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 { ; RV32-LABEL: ctz_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; RV32-NEXT: vid.v v10 -; RV32-NEXT: vmv.v.i v11, -1 ; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; RV32-NEXT: vid.v v10 +; RV32-NEXT: li a1, -1 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vmsne.vi v0, v8, 0 ; RV32-NEXT: srli a0, a0, 1 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a0 -; RV32-NEXT: vmacc.vv v8, v10, v11 -; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 +; RV32-NEXT: vmadd.vx v10, a1, v8 +; RV32-NEXT: vmv.v.i v8, 0 +; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 ; RV32-NEXT: vredmaxu.vs v8, v8, v8 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: sub a0, a0, a1 @@ -28,18 +28,18 @@ define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 { ; ; RV64-LABEL: ctz_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; RV64-NEXT: vid.v v10 -; RV64-NEXT: vmv.v.i v11, -1 ; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; RV64-NEXT: vid.v v10 +; RV64-NEXT: li a1, -1 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 -; RV64-NEXT: vmacc.vv v8, v10, v11 -; RV64-NEXT: vmv.v.i v9, 0 -; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 +; RV64-NEXT: vmadd.vx v10, a1, v8 +; RV64-NEXT: vmv.v.i v8, 0 +; RV64-NEXT: vmerge.vvm v8, v8, v10, v0 ; RV64-NEXT: vredmaxu.vs v8, v8, v8 ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: sub a0, a0, a1 @@ -109,17 +109,17 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) { ; ; RV64-LABEL: ctz_nxv8i1_no_range: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; RV64-NEXT: vid.v v16 -; RV64-NEXT: vmv.v.i v24, -1 ; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV64-NEXT: vid.v v16 +; RV64-NEXT: li a1, -1 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 -; RV64-NEXT: vmacc.vv v8, v16, v24 -; RV64-NEXT: vmv.v.i v16, 0 -; RV64-NEXT: vmerge.vvm v8, v16, v8, v0 +; RV64-NEXT: vmadd.vx v16, a1, v8 +; RV64-NEXT: vmv.v.i v8, 0 +; RV64-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV64-NEXT: vredmaxu.vs v8, v8, v8 ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: sub a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll index 96c349d..d166a6e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll @@ -92,6 +92,150 @@ entry: ret <vscale x 1 x i32> %va } +define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee2(<vscale x 1 x i32> %va) nounwind { +; SPILL-O2-LABEL: test_vector_callee2: +; SPILL-O2: # %bb.0: # %entry +; SPILL-O2-NEXT: addi sp, sp, -16 +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 12 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: sub sp, sp, a0 +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 11 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 10 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs1r.v v3, (a0) # vscale x 8-byte Folded Spill +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a1, a0, 3 +; SPILL-O2-NEXT: add a0, a1, a0 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs1r.v v5, (a0) # vscale x 8-byte Folded Spill +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a0, a0, 3 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs1r.v v7, (a0) # vscale x 8-byte Folded Spill +; SPILL-O2-NEXT: addi a0, sp, 16 +; SPILL-O2-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; SPILL-O2-NEXT: #APP +; SPILL-O2-NEXT: #NO_APP +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 11 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 10 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl1r.v v3, (a0) # vscale x 8-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a1, a0, 3 +; SPILL-O2-NEXT: add a0, a1, a0 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl1r.v v5, (a0) # vscale x 8-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a0, a0, 3 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload +; SPILL-O2-NEXT: addi a0, sp, 16 +; SPILL-O2-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 12 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add sp, sp, a0 +; SPILL-O2-NEXT: addi sp, sp, 16 +; SPILL-O2-NEXT: ret +entry: + call void asm sideeffect "", + "~{v1},~{v3},~{v5},~{v7},~{v24m2},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() + + ret <vscale x 1 x i32> %va +} + +define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee3(<vscale x 1 x i32> %va) nounwind { +; SPILL-O2-LABEL: test_vector_callee3: +; SPILL-O2: # %bb.0: # %entry +; SPILL-O2-NEXT: addi sp, sp, -16 +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 10 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: sub sp, sp, a0 +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a1, a0, 3 +; SPILL-O2-NEXT: add a0, a1, a0 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs1r.v v1, (a0) # vscale x 8-byte Folded Spill +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a0, a0, 3 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs1r.v v24, (a0) # vscale x 8-byte Folded Spill +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 6 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs2r.v v2, (a0) # vscale x 16-byte Folded Spill +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a0, a0, 2 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vs2r.v v26, (a0) # vscale x 16-byte Folded Spill +; SPILL-O2-NEXT: addi a0, sp, 16 +; SPILL-O2-NEXT: vs4r.v v28, (a0) # vscale x 32-byte Folded Spill +; SPILL-O2-NEXT: #APP +; SPILL-O2-NEXT: #NO_APP +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a1, a0, 3 +; SPILL-O2-NEXT: add a0, a1, a0 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl1r.v v1, (a0) # vscale x 8-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a0, a0, 3 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl1r.v v24, (a0) # vscale x 8-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 6 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl2r.v v2, (a0) # vscale x 16-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: slli a0, a0, 2 +; SPILL-O2-NEXT: add a0, sp, a0 +; SPILL-O2-NEXT: addi a0, a0, 16 +; SPILL-O2-NEXT: vl2r.v v26, (a0) # vscale x 16-byte Folded Reload +; SPILL-O2-NEXT: addi a0, sp, 16 +; SPILL-O2-NEXT: vl4r.v v28, (a0) # vscale x 32-byte Folded Reload +; SPILL-O2-NEXT: csrr a0, vlenb +; SPILL-O2-NEXT: li a1, 10 +; SPILL-O2-NEXT: mul a0, a0, a1 +; SPILL-O2-NEXT: add sp, sp, a0 +; SPILL-O2-NEXT: addi sp, sp, 16 +; SPILL-O2-NEXT: ret +entry: + call void asm sideeffect "", + "~{v1},~{v2},~{v3},~{v24},~{v26m2},~{v28m2},~{v29},~{v30},~{v31}"() + + ret <vscale x 1 x i32> %va +} + ; Make sure the local stack allocation pass doesn't count vector registers. The ; sizes are chosen to be on the edge of what RISCVRegister::needsFrameBaseReg ; considers to need a virtual base register. diff --git a/llvm/test/CodeGen/RISCV/rvv/interrupt-attr-nocall.ll b/llvm/test/CodeGen/RISCV/rvv/interrupt-attr-nocall.ll index af2e8d3..42c2556 100644 --- a/llvm/test/CodeGen/RISCV/rvv/interrupt-attr-nocall.ll +++ b/llvm/test/CodeGen/RISCV/rvv/interrupt-attr-nocall.ll @@ -14,12 +14,8 @@ define void @foo_lmul1() nounwind #0 { ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 1 ; CHECK-RV32-NEXT: sub sp, sp, a0 -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill ; CHECK-RV32-NEXT: lui a0, %hi(a) ; CHECK-RV32-NEXT: addi a0, a0, %lo(a) ; CHECK-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma @@ -31,12 +27,8 @@ define void @foo_lmul1() nounwind #0 { ; CHECK-RV32-NEXT: lui a0, %hi(c) ; CHECK-RV32-NEXT: addi a0, a0, %lo(c) ; CHECK-RV32-NEXT: vse32.v v8, (a0) -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 1 ; CHECK-RV32-NEXT: add sp, sp, a0 @@ -62,25 +54,8 @@ define void @foo_lmul2() nounwind #0 { ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 2 ; CHECK-RV32-NEXT: sub sp, sp, a0 -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: slli a1, a0, 1 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill ; CHECK-RV32-NEXT: lui a0, %hi(d) ; CHECK-RV32-NEXT: addi a0, a0, %lo(d) ; CHECK-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma @@ -92,25 +67,8 @@ define void @foo_lmul2() nounwind #0 { ; CHECK-RV32-NEXT: lui a0, %hi(f) ; CHECK-RV32-NEXT: addi a0, a0, %lo(f) ; CHECK-RV32-NEXT: vse32.v v8, (a0) -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: slli a1, a0, 1 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 2 ; CHECK-RV32-NEXT: add sp, sp, a0 @@ -136,56 +94,8 @@ define void @foo_lmul4() nounwind #0 { ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 3 ; CHECK-RV32-NEXT: sub sp, sp, a0 -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: slli a1, a0, 3 -; CHECK-RV32-NEXT: sub a0, a1, a0 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: slli a1, a0, 2 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: slli a1, a0, 1 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-NEXT: lui a0, %hi(g) ; CHECK-RV32-NEXT: addi a0, a0, %lo(g) ; CHECK-RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma @@ -197,50 +107,8 @@ define void @foo_lmul4() nounwind #0 { ; CHECK-RV32-NEXT: lui a0, %hi(i) ; CHECK-RV32-NEXT: addi a0, a0, %lo(i) ; CHECK-RV32-NEXT: vse32.v v8, (a0) -; CHECK-RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 3 -; CHECK-RV32-NEXT: sub a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 2 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 1 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: lw a1, 4(sp) # 4-byte Folded Reload -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 3 ; CHECK-RV32-NEXT: add sp, sp, a0 @@ -268,108 +136,12 @@ define void @foo_lmul8() nounwind #0 { ; CHECK-RV32-NEXT: slli a0, a0, 4 ; CHECK-RV32-NEXT: sub sp, sp, a0 ; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 4 -; CHECK-RV32-NEXT: sub a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a1, a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a1, a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v10, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v11, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a1, a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v12, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v13, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 3 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v14, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 3 ; CHECK-RV32-NEXT: add a0, sp, a0 ; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v15, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 3 -; CHECK-RV32-NEXT: sub a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v17, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 2 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v18, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v19, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 1 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v20, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v21, (a0) # vscale x 8-byte Folded Spill -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vs1r.v v22, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vs1r.v v23, (a0) # vscale x 8-byte Folded Spill +; CHECK-RV32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-RV32-NEXT: lui a0, %hi(j) ; CHECK-RV32-NEXT: addi a0, a0, %lo(j) ; CHECK-RV32-NEXT: li a1, 32 @@ -383,108 +155,12 @@ define void @foo_lmul8() nounwind #0 { ; CHECK-RV32-NEXT: addi a0, a0, %lo(l) ; CHECK-RV32-NEXT: vse32.v v8, (a0) ; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 4 -; CHECK-RV32-NEXT: sub a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a1, a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a1, a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v10, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v11, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a1, a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v12, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v13, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 3 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 3 ; CHECK-RV32-NEXT: add a0, sp, a0 ; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v15, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 3 -; CHECK-RV32-NEXT: sub a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v16, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: mv a1, a0 -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, a0, a1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v17, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 2 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v18, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 2 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v19, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a1, a0, 1 -; CHECK-RV32-NEXT: add a0, a1, a0 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v20, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: slli a0, a0, 1 -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v21, (a0) # vscale x 8-byte Folded Reload -; CHECK-RV32-NEXT: csrr a0, vlenb -; CHECK-RV32-NEXT: add a0, sp, a0 -; CHECK-RV32-NEXT: addi a0, a0, 16 -; CHECK-RV32-NEXT: vl1r.v v22, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vl1r.v v23, (a0) # vscale x 8-byte Folded Reload +; CHECK-RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 4 ; CHECK-RV32-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir index a050034..a7eaf39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir @@ -78,12 +78,12 @@ body: | ; CHECK-NEXT: %false:vrnov0 = COPY $v9 ; CHECK-NEXT: %mask:vmv0 = COPY $v0 ; CHECK-NEXT: %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 4, 5 /* e32 */, 0 /* tu, mu */ - ; CHECK-NEXT: %x:vr = PseudoVMV_V_V_M1 %pt, %true, 8, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: %x:vr = PseudoVMV_V_V_M1 %pt, %true, 4, 5 /* e32 */, 0 /* tu, mu */ %pt:vrnov0 = COPY $v8 %false:vrnov0 = COPY $v9 %mask:vmv0 = COPY $v0 - %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 4, 5 /* e32 */, 0 /* tu, mu */ - %x:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, %mask, 8, 5 /* e32 */ + %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 8, 5 /* e32 */, 0 /* tu, mu */ + %x:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, %mask, 4, 5 /* e32 */ ... --- # Shouldn't be converted because false operands are different @@ -163,3 +163,47 @@ body: | %true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %mask, 4, 5 /* e32 */, 0 /* tu, mu */ bb.1: %5:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %false, %true, %mask, 4, 5 /* e32 */ +... +--- +# Shouldn't be converted because vmerge adds back in elements from false past avl that would be lost if we converted to vmv.v.v +name: preserve_false +body: | + bb.0: + liveins: $v8, $v9, $v0, $x8, $x9 + ; CHECK-LABEL: name: preserve_false + ; CHECK: liveins: $v8, $v9, $v0, $x8, $x9 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %pt:vrnov0 = COPY $v8 + ; CHECK-NEXT: %false:vr = COPY $v9 + ; CHECK-NEXT: %mask:vmv0 = COPY $v0 + ; CHECK-NEXT: %avl1:gprnox0 = COPY $x8 + ; CHECK-NEXT: %avl2:gprnox0 = COPY $x9 + ; CHECK-NEXT: %true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %mask, %avl1, 5 /* e32 */, 3 /* ta, ma */ + ; CHECK-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, %mask, %avl2, 5 /* e32 */ + %pt:vrnov0 = COPY $v8 + %false:vr = COPY $v9 + %mask:vmv0 = COPY $v0 + %avl1:gprnox0 = COPY $x8 + %avl2:gprnox0 = COPY $x9 + %true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %mask, %avl1, 5 /* e32 */, 3 /* ta, ma */ + %5:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, %mask, %avl2, 5 /* e32 */ +... +--- +# But we can convert this one because vmerge's avl being <= true's means we don't lose any false elements past avl. +name: preserve_false_avl_known_le +body: | + bb.0: + liveins: $v8, $v9, $v0 + ; CHECK-LABEL: name: preserve_false_avl_known_le + ; CHECK: liveins: $v8, $v9, $v0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %pt:vr = COPY $v8 + ; CHECK-NEXT: %false:vrnov0 = COPY $v9 + ; CHECK-NEXT: %mask:vmv0 = COPY $v0 + ; CHECK-NEXT: %true:vrnov0 = PseudoVADD_VV_M1_MASK %false, $noreg, $noreg, %mask, 1, 5 /* e32 */, 3 /* ta, ma */ + ; CHECK-NEXT: [[PseudoVMV_V_V_M1_:%[0-9]+]]:vr = PseudoVMV_V_V_M1 %pt, %true, 1, 5 /* e32 */, 0 /* tu, mu */ + %pt:vrnov0 = COPY $v8 + %false:vr = COPY $v9 + %mask:vmv0 = COPY $v0 + %true:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %mask, 2, 5 /* e32 */, 3 /* ta, ma */ + %5:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, %mask, 1, 5 /* e32 */ diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll index 3aeb4e8..9ffc84a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll @@ -71,10 +71,31 @@ define <vscale x 8 x i64> @vpmerge_m8(<vscale x 8 x i64> %x, <vscale x 8 x i64> ret <vscale x 8 x i64> %1 } -declare <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32) -declare <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32) -declare <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32) -declare <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32) -declare <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32) -declare <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32) -declare <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32) +; Shouldn't be converted because vmerge adds back in elements from false past avl that would be lost if we converted to vmv.v.v +define <vscale x 2 x i32> @preserve_false(ptr %p, <vscale x 2 x i32> %pt, <vscale x 2 x i32> %false, <vscale x 2 x i1> %mask, i64 %avl1, i64 %avl2) { +; CHECK-LABEL: preserve_false: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vle32.v v10, (a0), v0.t +; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, ma +; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0 +; CHECK-NEXT: ret + %true = call <vscale x 2 x i32> @llvm.riscv.vle.mask(<vscale x 2 x i32> %false, ptr %p, <vscale x 2 x i1> %mask, i64 %avl1, i64 3) + %res = call <vscale x 2 x i32> @llvm.riscv.vmerge(<vscale x 2 x i32> %pt, <vscale x 2 x i32> %false, <vscale x 2 x i32> %true, <vscale x 2 x i1> %mask, i64 %avl2) + ret <vscale x 2 x i32> %res +} + +; Can fold this because its avl is known to be <= than true, so no elements from false need to be introduced past avl. +define <vscale x 2 x i32> @preserve_false_avl_known_le(ptr %p, <vscale x 2 x i32> %pt, <vscale x 2 x i32> %false, <vscale x 2 x i1> %mask) { +; CHECK-LABEL: preserve_false_avl_known_le: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v9, (a0), v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %true = call <vscale x 2 x i32> @llvm.riscv.vle.mask(<vscale x 2 x i32> %false, ptr %p, <vscale x 2 x i1> %mask, i64 2, i64 3) + %res = call <vscale x 2 x i32> @llvm.riscv.vmerge(<vscale x 2 x i32> %pt, <vscale x 2 x i32> %false, <vscale x 2 x i32> %true, <vscale x 2 x i1> %mask, i64 1) + ret <vscale x 2 x i32> %res +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll index 96a7b14..f9f0aa6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -550,8 +550,8 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>} @masked_load_factor2(ptr %p) { ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %deinterleaved.results } -define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4(ptr %p) { -; CHECK-LABEL: masked_loat_factor4: +define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_load_factor4(ptr %p) { +; CHECK-LABEL: masked_load_factor4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vlseg4e8.v v8, (a0) @@ -561,8 +561,8 @@ define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results } -define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4_mask(ptr %p, <vscale x 8 x i1> %mask) { -; CHECK-LABEL: masked_loat_factor4_mask: +define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_load_factor4_mask(ptr %p, <vscale x 8 x i1> %mask) { +; CHECK-LABEL: masked_load_factor4_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vlseg4e8.v v8, (a0), v0.t @@ -575,8 +575,8 @@ define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i ; Negative test - some of the deinterleaved elements might come from the ; passthru not the load -define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4_passthru(ptr %p, <vscale x 8 x i1> %mask, <vscale x 32 x i8> %passthru) { -; CHECK-LABEL: masked_loat_factor4_passthru: +define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_load_factor4_passthru(ptr %p, <vscale x 8 x i1> %mask, <vscale x 32 x i8> %passthru) { +; CHECK-LABEL: masked_load_factor4_passthru: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll index af55aaa..7e7d11e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll @@ -303,3 +303,26 @@ define void @vector_interleave_store_factor8(<vscale x 2 x i32> %a, <vscale x 2 store <vscale x 16 x i32> %v, ptr %p ret void } + +define void @masked_store_factor3(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, ptr %p) { +; CHECK-LABEL: masked_store_factor3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: ret + %v = call <vscale x 6 x i32> @llvm.vector.interleave3(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) + call void @llvm.masked.store(<vscale x 6 x i32> %v, ptr %p, i32 4, <vscale x 6 x i1> splat (i1 true)) + ret void +} + +define void @masked_store_factor3_masked(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, ptr %p, <vscale x 2 x i1> %m) { +; CHECK-LABEL: masked_store_factor3_masked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: ret + %interleaved.mask = call <vscale x 6 x i1> @llvm.vector.interleave3(<vscale x 2 x i1> %m, <vscale x 2 x i1> %m, <vscale x 2 x i1> %m) + %v = call <vscale x 6 x i32> @llvm.vector.interleave3(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) + call void @llvm.masked.store(<vscale x 6 x i32> %v, ptr %p, i32 4, <vscale x 6 x i1> %interleaved.mask) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll index 32753ca..cd7f30d 100644 --- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll +++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll @@ -716,92 +716,101 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: slli a4, a4, 8 ; RV32I-NEXT: slli a5, a5, 16 ; RV32I-NEXT: slli a6, a6, 24 +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: or a4, a6, a5 +; RV32I-NEXT: lbu a5, 8(a0) +; RV32I-NEXT: lbu a6, 9(a0) +; RV32I-NEXT: lbu t3, 10(a0) +; RV32I-NEXT: lbu t4, 11(a0) ; RV32I-NEXT: slli t0, t0, 8 -; RV32I-NEXT: or a4, a4, a3 -; RV32I-NEXT: or a5, a6, a5 -; RV32I-NEXT: or a3, t0, a7 -; RV32I-NEXT: lbu a6, 8(a0) -; RV32I-NEXT: lbu a7, 9(a0) -; RV32I-NEXT: lbu t0, 10(a0) -; RV32I-NEXT: lbu t3, 11(a0) ; RV32I-NEXT: slli t1, t1, 16 ; RV32I-NEXT: slli t2, t2, 24 -; RV32I-NEXT: slli a7, a7, 8 -; RV32I-NEXT: slli t0, t0, 16 -; RV32I-NEXT: slli t3, t3, 24 -; RV32I-NEXT: or t1, t2, t1 -; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a7, t3, t0 -; RV32I-NEXT: lbu t0, 12(a0) -; RV32I-NEXT: lbu t2, 13(a0) -; RV32I-NEXT: lbu t3, 14(a0) -; RV32I-NEXT: lbu t4, 15(a0) -; RV32I-NEXT: lbu a0, 0(a1) +; RV32I-NEXT: slli a6, a6, 8 +; RV32I-NEXT: or a7, t0, a7 +; RV32I-NEXT: or t0, t2, t1 +; RV32I-NEXT: or a5, a6, a5 +; RV32I-NEXT: lbu a6, 12(a0) +; RV32I-NEXT: lbu t1, 13(a0) +; RV32I-NEXT: lbu t2, 14(a0) +; RV32I-NEXT: lbu a0, 15(a0) +; RV32I-NEXT: slli t3, t3, 16 +; RV32I-NEXT: slli t4, t4, 24 +; RV32I-NEXT: slli t1, t1, 8 +; RV32I-NEXT: slli t2, t2, 16 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or t3, t4, t3 +; RV32I-NEXT: or a6, t1, a6 +; RV32I-NEXT: or a0, a0, t2 +; RV32I-NEXT: lbu t1, 1(a1) +; RV32I-NEXT: lbu t2, 0(a1) +; RV32I-NEXT: lbu t4, 2(a1) +; RV32I-NEXT: lbu a1, 3(a1) +; RV32I-NEXT: slli t1, t1, 8 +; RV32I-NEXT: or t1, t1, t2 ; RV32I-NEXT: sw zero, 16(sp) ; RV32I-NEXT: sw zero, 20(sp) ; RV32I-NEXT: sw zero, 24(sp) ; RV32I-NEXT: sw zero, 28(sp) -; RV32I-NEXT: slli t2, t2, 8 -; RV32I-NEXT: or a1, t2, t0 -; RV32I-NEXT: mv t0, sp -; RV32I-NEXT: slli t3, t3, 16 -; RV32I-NEXT: slli t4, t4, 24 -; RV32I-NEXT: or t2, t4, t3 -; RV32I-NEXT: srli t3, a0, 3 -; RV32I-NEXT: or a4, a5, a4 -; RV32I-NEXT: andi a5, a0, 31 -; RV32I-NEXT: andi t3, t3, 12 -; RV32I-NEXT: xori a5, a5, 31 -; RV32I-NEXT: or a3, t1, a3 -; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a1, t2, a1 -; RV32I-NEXT: add t0, t0, t3 -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a3, 4(sp) -; RV32I-NEXT: sw a6, 8(sp) -; RV32I-NEXT: sw a1, 12(sp) -; RV32I-NEXT: lw a1, 4(t0) -; RV32I-NEXT: lw a3, 8(t0) -; RV32I-NEXT: lw a4, 0(t0) -; RV32I-NEXT: lw a6, 12(t0) -; RV32I-NEXT: srl a7, a1, a0 -; RV32I-NEXT: slli t0, a3, 1 -; RV32I-NEXT: srl a4, a4, a0 -; RV32I-NEXT: slli a1, a1, 1 -; RV32I-NEXT: srl a3, a3, a0 -; RV32I-NEXT: slli t1, a6, 1 -; RV32I-NEXT: srl a0, a6, a0 -; RV32I-NEXT: sll a6, t0, a5 -; RV32I-NEXT: sll a1, a1, a5 -; RV32I-NEXT: sll a5, t1, a5 +; RV32I-NEXT: slli t4, t4, 16 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or a1, a1, t4 +; RV32I-NEXT: mv t2, sp +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: or a4, t0, a7 +; RV32I-NEXT: or a5, t3, a5 +; RV32I-NEXT: or a0, a0, a6 +; RV32I-NEXT: or a1, a1, t1 +; RV32I-NEXT: sw a3, 0(sp) +; RV32I-NEXT: sw a4, 4(sp) +; RV32I-NEXT: sw a5, 8(sp) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: srli a0, a1, 3 +; RV32I-NEXT: andi a3, a1, 31 +; RV32I-NEXT: andi a0, a0, 12 +; RV32I-NEXT: xori a3, a3, 31 +; RV32I-NEXT: add a0, t2, a0 +; RV32I-NEXT: lw a4, 4(a0) +; RV32I-NEXT: lw a5, 8(a0) +; RV32I-NEXT: lw a6, 0(a0) +; RV32I-NEXT: lw a0, 12(a0) +; RV32I-NEXT: srl a7, a4, a1 +; RV32I-NEXT: slli t0, a5, 1 +; RV32I-NEXT: srl a6, a6, a1 +; RV32I-NEXT: slli a4, a4, 1 +; RV32I-NEXT: srl a5, a5, a1 +; RV32I-NEXT: slli t1, a0, 1 +; RV32I-NEXT: srl a0, a0, a1 +; RV32I-NEXT: sll a1, t0, a3 +; RV32I-NEXT: sll a4, a4, a3 +; RV32I-NEXT: sll a3, t1, a3 ; RV32I-NEXT: srli t0, a0, 16 ; RV32I-NEXT: srli t1, a0, 24 ; RV32I-NEXT: srli t2, a0, 8 -; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a1, a4, a1 -; RV32I-NEXT: or a3, a3, a5 +; RV32I-NEXT: or a1, a7, a1 +; RV32I-NEXT: or a4, a6, a4 +; RV32I-NEXT: or a3, a5, a3 ; RV32I-NEXT: sb a0, 12(a2) ; RV32I-NEXT: sb t2, 13(a2) ; RV32I-NEXT: sb t0, 14(a2) ; RV32I-NEXT: sb t1, 15(a2) ; RV32I-NEXT: srli a0, a3, 16 -; RV32I-NEXT: srli a4, a3, 24 -; RV32I-NEXT: srli a5, a3, 8 -; RV32I-NEXT: srli a7, a1, 16 -; RV32I-NEXT: srli t0, a1, 24 -; RV32I-NEXT: srli t1, a1, 8 -; RV32I-NEXT: srli t2, a6, 16 -; RV32I-NEXT: srli t3, a6, 24 +; RV32I-NEXT: srli a5, a3, 24 +; RV32I-NEXT: srli a6, a3, 8 +; RV32I-NEXT: srli a7, a4, 16 +; RV32I-NEXT: srli t0, a4, 24 +; RV32I-NEXT: srli t1, a4, 8 +; RV32I-NEXT: srli t2, a1, 16 +; RV32I-NEXT: srli t3, a1, 24 ; RV32I-NEXT: sb a3, 8(a2) -; RV32I-NEXT: sb a5, 9(a2) +; RV32I-NEXT: sb a6, 9(a2) ; RV32I-NEXT: sb a0, 10(a2) -; RV32I-NEXT: sb a4, 11(a2) -; RV32I-NEXT: srli a0, a6, 8 -; RV32I-NEXT: sb a1, 0(a2) +; RV32I-NEXT: sb a5, 11(a2) +; RV32I-NEXT: srli a0, a1, 8 +; RV32I-NEXT: sb a4, 0(a2) ; RV32I-NEXT: sb t1, 1(a2) ; RV32I-NEXT: sb a7, 2(a2) ; RV32I-NEXT: sb t0, 3(a2) -; RV32I-NEXT: sb a6, 4(a2) +; RV32I-NEXT: sb a1, 4(a2) ; RV32I-NEXT: sb a0, 5(a2) ; RV32I-NEXT: sb t2, 6(a2) ; RV32I-NEXT: sb t3, 7(a2) @@ -943,93 +952,102 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: slli a4, a4, 8 ; RV32I-NEXT: slli a5, a5, 16 ; RV32I-NEXT: slli a6, a6, 24 +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: or a4, a6, a5 +; RV32I-NEXT: lbu a5, 8(a0) +; RV32I-NEXT: lbu a6, 9(a0) +; RV32I-NEXT: lbu t3, 10(a0) +; RV32I-NEXT: lbu t4, 11(a0) ; RV32I-NEXT: slli t0, t0, 8 -; RV32I-NEXT: or a4, a4, a3 -; RV32I-NEXT: or a5, a6, a5 -; RV32I-NEXT: or a3, t0, a7 -; RV32I-NEXT: lbu a6, 8(a0) -; RV32I-NEXT: lbu a7, 9(a0) -; RV32I-NEXT: lbu t0, 10(a0) -; RV32I-NEXT: lbu t3, 11(a0) ; RV32I-NEXT: slli t1, t1, 16 ; RV32I-NEXT: slli t2, t2, 24 -; RV32I-NEXT: slli a7, a7, 8 -; RV32I-NEXT: slli t0, t0, 16 -; RV32I-NEXT: slli t3, t3, 24 -; RV32I-NEXT: or t1, t2, t1 -; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a7, t3, t0 -; RV32I-NEXT: lbu t0, 12(a0) -; RV32I-NEXT: lbu t2, 13(a0) -; RV32I-NEXT: lbu t3, 14(a0) -; RV32I-NEXT: lbu t4, 15(a0) -; RV32I-NEXT: lbu a0, 0(a1) +; RV32I-NEXT: slli a6, a6, 8 +; RV32I-NEXT: or a7, t0, a7 +; RV32I-NEXT: or t0, t2, t1 +; RV32I-NEXT: or a5, a6, a5 +; RV32I-NEXT: lbu a6, 12(a0) +; RV32I-NEXT: lbu t1, 13(a0) +; RV32I-NEXT: lbu t2, 14(a0) +; RV32I-NEXT: lbu a0, 15(a0) +; RV32I-NEXT: slli t3, t3, 16 +; RV32I-NEXT: slli t4, t4, 24 +; RV32I-NEXT: slli t1, t1, 8 +; RV32I-NEXT: slli t2, t2, 16 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or t3, t4, t3 +; RV32I-NEXT: or a6, t1, a6 +; RV32I-NEXT: or a0, a0, t2 +; RV32I-NEXT: lbu t1, 1(a1) +; RV32I-NEXT: lbu t2, 0(a1) +; RV32I-NEXT: lbu t4, 2(a1) +; RV32I-NEXT: lbu a1, 3(a1) +; RV32I-NEXT: slli t1, t1, 8 +; RV32I-NEXT: or t1, t1, t2 ; RV32I-NEXT: sw zero, 0(sp) ; RV32I-NEXT: sw zero, 4(sp) ; RV32I-NEXT: sw zero, 8(sp) ; RV32I-NEXT: sw zero, 12(sp) -; RV32I-NEXT: slli t2, t2, 8 -; RV32I-NEXT: or a1, t2, t0 -; RV32I-NEXT: addi t0, sp, 16 -; RV32I-NEXT: slli t3, t3, 16 -; RV32I-NEXT: slli t4, t4, 24 -; RV32I-NEXT: or t2, t4, t3 -; RV32I-NEXT: srli t3, a0, 3 -; RV32I-NEXT: or a4, a5, a4 -; RV32I-NEXT: andi a5, a0, 31 -; RV32I-NEXT: andi t3, t3, 12 -; RV32I-NEXT: or a3, t1, a3 -; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a1, t2, a1 -; RV32I-NEXT: sub a7, t0, t3 -; RV32I-NEXT: sw a4, 16(sp) -; RV32I-NEXT: sw a3, 20(sp) -; RV32I-NEXT: sw a6, 24(sp) -; RV32I-NEXT: sw a1, 28(sp) -; RV32I-NEXT: lw a1, 0(a7) -; RV32I-NEXT: lw a3, 4(a7) -; RV32I-NEXT: lw a4, 8(a7) -; RV32I-NEXT: lw a6, 12(a7) -; RV32I-NEXT: xori a5, a5, 31 -; RV32I-NEXT: sll a7, a3, a0 -; RV32I-NEXT: srli t0, a1, 1 -; RV32I-NEXT: sll a6, a6, a0 -; RV32I-NEXT: srli t1, a4, 1 -; RV32I-NEXT: sll a4, a4, a0 -; RV32I-NEXT: srli a3, a3, 1 -; RV32I-NEXT: sll a0, a1, a0 -; RV32I-NEXT: srl a1, t0, a5 -; RV32I-NEXT: srl t0, t1, a5 -; RV32I-NEXT: srl a3, a3, a5 -; RV32I-NEXT: srli a5, a0, 16 -; RV32I-NEXT: srli t1, a0, 24 -; RV32I-NEXT: srli t2, a0, 8 -; RV32I-NEXT: or a1, a7, a1 -; RV32I-NEXT: or a6, a6, t0 +; RV32I-NEXT: slli t4, t4, 16 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or a1, a1, t4 +; RV32I-NEXT: addi t2, sp, 16 ; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: sb a0, 0(a2) +; RV32I-NEXT: or a4, t0, a7 +; RV32I-NEXT: or a5, t3, a5 +; RV32I-NEXT: or a0, a0, a6 +; RV32I-NEXT: or a1, a1, t1 +; RV32I-NEXT: sw a3, 16(sp) +; RV32I-NEXT: sw a4, 20(sp) +; RV32I-NEXT: sw a5, 24(sp) +; RV32I-NEXT: sw a0, 28(sp) +; RV32I-NEXT: srli a0, a1, 3 +; RV32I-NEXT: andi a3, a1, 31 +; RV32I-NEXT: andi a0, a0, 12 +; RV32I-NEXT: sub a0, t2, a0 +; RV32I-NEXT: lw a4, 0(a0) +; RV32I-NEXT: lw a5, 4(a0) +; RV32I-NEXT: lw a6, 8(a0) +; RV32I-NEXT: lw a0, 12(a0) +; RV32I-NEXT: xori a3, a3, 31 +; RV32I-NEXT: sll a7, a5, a1 +; RV32I-NEXT: srli t0, a4, 1 +; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: srli t1, a6, 1 +; RV32I-NEXT: sll a6, a6, a1 +; RV32I-NEXT: srli a5, a5, 1 +; RV32I-NEXT: sll a1, a4, a1 +; RV32I-NEXT: srl a4, t0, a3 +; RV32I-NEXT: srl t0, t1, a3 +; RV32I-NEXT: srl a3, a5, a3 +; RV32I-NEXT: srli a5, a1, 16 +; RV32I-NEXT: srli t1, a1, 24 +; RV32I-NEXT: srli t2, a1, 8 +; RV32I-NEXT: or a4, a7, a4 +; RV32I-NEXT: or a0, a0, t0 +; RV32I-NEXT: or a3, a6, a3 +; RV32I-NEXT: sb a1, 0(a2) ; RV32I-NEXT: sb t2, 1(a2) ; RV32I-NEXT: sb a5, 2(a2) ; RV32I-NEXT: sb t1, 3(a2) -; RV32I-NEXT: srli a0, a3, 16 -; RV32I-NEXT: srli a4, a3, 24 -; RV32I-NEXT: srli a5, a3, 8 -; RV32I-NEXT: srli a7, a6, 16 -; RV32I-NEXT: srli t0, a6, 24 -; RV32I-NEXT: srli t1, a6, 8 -; RV32I-NEXT: srli t2, a1, 16 -; RV32I-NEXT: srli t3, a1, 24 +; RV32I-NEXT: srli a1, a3, 16 +; RV32I-NEXT: srli a5, a3, 24 +; RV32I-NEXT: srli a6, a3, 8 +; RV32I-NEXT: srli a7, a0, 16 +; RV32I-NEXT: srli t0, a0, 24 +; RV32I-NEXT: srli t1, a0, 8 +; RV32I-NEXT: srli t2, a4, 16 +; RV32I-NEXT: srli t3, a4, 24 ; RV32I-NEXT: sb a3, 8(a2) -; RV32I-NEXT: sb a5, 9(a2) -; RV32I-NEXT: sb a0, 10(a2) -; RV32I-NEXT: sb a4, 11(a2) -; RV32I-NEXT: srli a0, a1, 8 -; RV32I-NEXT: sb a6, 12(a2) +; RV32I-NEXT: sb a6, 9(a2) +; RV32I-NEXT: sb a1, 10(a2) +; RV32I-NEXT: sb a5, 11(a2) +; RV32I-NEXT: srli a1, a4, 8 +; RV32I-NEXT: sb a0, 12(a2) ; RV32I-NEXT: sb t1, 13(a2) ; RV32I-NEXT: sb a7, 14(a2) ; RV32I-NEXT: sb t0, 15(a2) -; RV32I-NEXT: sb a1, 4(a2) -; RV32I-NEXT: sb a0, 5(a2) +; RV32I-NEXT: sb a4, 4(a2) +; RV32I-NEXT: sb a1, 5(a2) ; RV32I-NEXT: sb t2, 6(a2) ; RV32I-NEXT: sb t3, 7(a2) ; RV32I-NEXT: addi sp, sp, 32 @@ -1168,73 +1186,82 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: lbu t1, 6(a0) ; RV32I-NEXT: lbu t2, 7(a0) ; RV32I-NEXT: slli a4, a4, 8 +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: lbu a4, 8(a0) +; RV32I-NEXT: lbu t3, 9(a0) +; RV32I-NEXT: lbu t4, 10(a0) +; RV32I-NEXT: lbu t5, 11(a0) ; RV32I-NEXT: slli a5, a5, 16 ; RV32I-NEXT: slli a6, a6, 24 ; RV32I-NEXT: slli t0, t0, 8 -; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: or a4, a6, a5 -; RV32I-NEXT: or a5, t0, a7 -; RV32I-NEXT: lbu a6, 8(a0) -; RV32I-NEXT: lbu a7, 9(a0) -; RV32I-NEXT: lbu t0, 10(a0) -; RV32I-NEXT: lbu t3, 11(a0) ; RV32I-NEXT: slli t1, t1, 16 ; RV32I-NEXT: slli t2, t2, 24 -; RV32I-NEXT: slli a7, a7, 8 -; RV32I-NEXT: slli t0, t0, 16 -; RV32I-NEXT: slli t3, t3, 24 -; RV32I-NEXT: or t1, t2, t1 -; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a7, t3, t0 +; RV32I-NEXT: or a5, a6, a5 +; RV32I-NEXT: or a6, t0, a7 +; RV32I-NEXT: or a7, t2, t1 ; RV32I-NEXT: lbu t0, 12(a0) -; RV32I-NEXT: lbu t2, 13(a0) -; RV32I-NEXT: lbu t3, 14(a0) -; RV32I-NEXT: lbu t4, 15(a0) -; RV32I-NEXT: lbu a0, 0(a1) -; RV32I-NEXT: slli t2, t2, 8 -; RV32I-NEXT: or a1, t2, t0 -; RV32I-NEXT: mv t0, sp -; RV32I-NEXT: slli t3, t3, 16 -; RV32I-NEXT: slli t4, t4, 24 -; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: srli a4, a0, 3 -; RV32I-NEXT: or a5, t1, a5 -; RV32I-NEXT: andi t1, a0, 31 -; RV32I-NEXT: or t2, t4, t3 -; RV32I-NEXT: srai t3, t4, 31 -; RV32I-NEXT: andi a4, a4, 12 -; RV32I-NEXT: xori t1, t1, 31 +; RV32I-NEXT: lbu t1, 13(a0) +; RV32I-NEXT: lbu t2, 14(a0) +; RV32I-NEXT: lbu a0, 15(a0) +; RV32I-NEXT: slli t3, t3, 8 +; RV32I-NEXT: slli t4, t4, 16 +; RV32I-NEXT: slli t5, t5, 24 +; RV32I-NEXT: slli t1, t1, 8 +; RV32I-NEXT: or a4, t3, a4 +; RV32I-NEXT: or t3, t5, t4 +; RV32I-NEXT: or t0, t1, t0 +; RV32I-NEXT: lbu t1, 1(a1) +; RV32I-NEXT: lbu t4, 0(a1) +; RV32I-NEXT: lbu t5, 2(a1) +; RV32I-NEXT: lbu a1, 3(a1) +; RV32I-NEXT: slli t1, t1, 8 +; RV32I-NEXT: or t1, t1, t4 +; RV32I-NEXT: slli t5, t5, 16 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or a1, a1, t5 +; RV32I-NEXT: or a3, a5, a3 +; RV32I-NEXT: mv a5, sp +; RV32I-NEXT: slli t2, t2, 16 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or t2, a0, t2 +; RV32I-NEXT: srai a0, a0, 31 ; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: or a1, t2, a1 -; RV32I-NEXT: sw t3, 16(sp) -; RV32I-NEXT: sw t3, 20(sp) -; RV32I-NEXT: sw t3, 24(sp) -; RV32I-NEXT: sw t3, 28(sp) -; RV32I-NEXT: add a4, t0, a4 +; RV32I-NEXT: or a4, t3, a4 +; RV32I-NEXT: or a7, t2, t0 +; RV32I-NEXT: or a1, a1, t1 +; RV32I-NEXT: sw a0, 16(sp) +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: sw a0, 24(sp) +; RV32I-NEXT: sw a0, 28(sp) ; RV32I-NEXT: sw a3, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: sw a6, 8(sp) -; RV32I-NEXT: sw a1, 12(sp) -; RV32I-NEXT: lw a1, 4(a4) -; RV32I-NEXT: lw a3, 8(a4) -; RV32I-NEXT: lw a5, 0(a4) -; RV32I-NEXT: lw a4, 12(a4) -; RV32I-NEXT: srl a6, a1, a0 -; RV32I-NEXT: slli a7, a3, 1 -; RV32I-NEXT: srl a5, a5, a0 -; RV32I-NEXT: slli a1, a1, 1 -; RV32I-NEXT: srl a3, a3, a0 -; RV32I-NEXT: slli t0, a4, 1 -; RV32I-NEXT: sra a0, a4, a0 -; RV32I-NEXT: sll a4, a7, t1 -; RV32I-NEXT: sll a1, a1, t1 -; RV32I-NEXT: sll a7, t0, t1 +; RV32I-NEXT: sw a6, 4(sp) +; RV32I-NEXT: sw a4, 8(sp) +; RV32I-NEXT: sw a7, 12(sp) +; RV32I-NEXT: srli a0, a1, 3 +; RV32I-NEXT: andi a3, a1, 31 +; RV32I-NEXT: andi a0, a0, 12 +; RV32I-NEXT: xori a3, a3, 31 +; RV32I-NEXT: add a0, a5, a0 +; RV32I-NEXT: lw a4, 4(a0) +; RV32I-NEXT: lw a5, 8(a0) +; RV32I-NEXT: lw a6, 0(a0) +; RV32I-NEXT: lw a0, 12(a0) +; RV32I-NEXT: srl a7, a4, a1 +; RV32I-NEXT: slli t0, a5, 1 +; RV32I-NEXT: srl a6, a6, a1 +; RV32I-NEXT: slli a4, a4, 1 +; RV32I-NEXT: srl a5, a5, a1 +; RV32I-NEXT: slli t1, a0, 1 +; RV32I-NEXT: sra a0, a0, a1 +; RV32I-NEXT: sll a1, t0, a3 +; RV32I-NEXT: sll a4, a4, a3 +; RV32I-NEXT: sll a3, t1, a3 ; RV32I-NEXT: srli t0, a0, 16 ; RV32I-NEXT: srli t1, a0, 24 ; RV32I-NEXT: srli t2, a0, 8 +; RV32I-NEXT: or a1, a7, a1 ; RV32I-NEXT: or a4, a6, a4 -; RV32I-NEXT: or a1, a5, a1 -; RV32I-NEXT: or a3, a3, a7 +; RV32I-NEXT: or a3, a5, a3 ; RV32I-NEXT: sb a0, 12(a2) ; RV32I-NEXT: sb t2, 13(a2) ; RV32I-NEXT: sb t0, 14(a2) @@ -1242,21 +1269,21 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: srli a0, a3, 16 ; RV32I-NEXT: srli a5, a3, 24 ; RV32I-NEXT: srli a6, a3, 8 -; RV32I-NEXT: srli a7, a1, 16 -; RV32I-NEXT: srli t0, a1, 24 -; RV32I-NEXT: srli t1, a1, 8 -; RV32I-NEXT: srli t2, a4, 16 -; RV32I-NEXT: srli t3, a4, 24 +; RV32I-NEXT: srli a7, a4, 16 +; RV32I-NEXT: srli t0, a4, 24 +; RV32I-NEXT: srli t1, a4, 8 +; RV32I-NEXT: srli t2, a1, 16 +; RV32I-NEXT: srli t3, a1, 24 ; RV32I-NEXT: sb a3, 8(a2) ; RV32I-NEXT: sb a6, 9(a2) ; RV32I-NEXT: sb a0, 10(a2) ; RV32I-NEXT: sb a5, 11(a2) -; RV32I-NEXT: srli a0, a4, 8 -; RV32I-NEXT: sb a1, 0(a2) +; RV32I-NEXT: srli a0, a1, 8 +; RV32I-NEXT: sb a4, 0(a2) ; RV32I-NEXT: sb t1, 1(a2) ; RV32I-NEXT: sb a7, 2(a2) ; RV32I-NEXT: sb t0, 3(a2) -; RV32I-NEXT: sb a4, 4(a2) +; RV32I-NEXT: sb a1, 4(a2) ; RV32I-NEXT: sb a0, 5(a2) ; RV32I-NEXT: sb t2, 6(a2) ; RV32I-NEXT: sb t3, 7(a2) @@ -1272,17 +1299,19 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-LABEL: lshr_32bytes: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -144 -; RV64I-NEXT: sd s0, 136(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s1, 128(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s2, 120(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s3, 112(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s4, 104(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s5, 96(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s6, 88(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s7, 80(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s8, 72(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s9, 64(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi sp, sp, -160 +; RV64I-NEXT: sd s0, 152(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 144(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 136(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s3, 128(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s4, 120(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s5, 112(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s6, 104(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s7, 96(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s8, 88(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s9, 80(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s10, 72(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s11, 64(sp) # 8-byte Folded Spill ; RV64I-NEXT: lbu a3, 0(a0) ; RV64I-NEXT: lbu a4, 1(a0) ; RV64I-NEXT: lbu a5, 2(a0) @@ -1299,122 +1328,143 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: lbu s1, 13(a0) ; RV64I-NEXT: lbu s2, 14(a0) ; RV64I-NEXT: lbu s3, 15(a0) -; RV64I-NEXT: slli a4, a4, 8 -; RV64I-NEXT: slli a5, a5, 16 -; RV64I-NEXT: slli a6, a6, 24 -; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 ; RV64I-NEXT: lbu s4, 16(a0) ; RV64I-NEXT: lbu s5, 17(a0) ; RV64I-NEXT: lbu s6, 18(a0) ; RV64I-NEXT: lbu s7, 19(a0) +; RV64I-NEXT: slli a4, a4, 8 +; RV64I-NEXT: slli s8, a5, 16 +; RV64I-NEXT: slli a6, a6, 24 ; RV64I-NEXT: slli t0, t0, 8 ; RV64I-NEXT: slli t1, t1, 16 ; RV64I-NEXT: slli t2, t2, 24 +; RV64I-NEXT: or a5, a4, a3 +; RV64I-NEXT: or a6, a6, s8 +; RV64I-NEXT: or a3, t0, a7 +; RV64I-NEXT: or a4, t2, t1 +; RV64I-NEXT: lbu s8, 20(a0) +; RV64I-NEXT: lbu s9, 21(a0) +; RV64I-NEXT: lbu s10, 22(a0) +; RV64I-NEXT: lbu s11, 23(a0) ; RV64I-NEXT: slli t4, t4, 8 ; RV64I-NEXT: slli t5, t5, 16 ; RV64I-NEXT: slli t6, t6, 24 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a6, t2, t1 -; RV64I-NEXT: or a7, t4, t3 -; RV64I-NEXT: or t0, t6, t5 -; RV64I-NEXT: lbu t5, 20(a0) -; RV64I-NEXT: lbu t6, 21(a0) -; RV64I-NEXT: lbu s8, 22(a0) -; RV64I-NEXT: lbu s9, 23(a0) ; RV64I-NEXT: slli s1, s1, 8 ; RV64I-NEXT: slli s2, s2, 16 ; RV64I-NEXT: slli s3, s3, 24 +; RV64I-NEXT: or a7, t4, t3 +; RV64I-NEXT: or t0, t6, t5 +; RV64I-NEXT: or t1, s1, s0 +; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: lbu t6, 24(a0) +; RV64I-NEXT: lbu s0, 25(a0) +; RV64I-NEXT: lbu s1, 26(a0) +; RV64I-NEXT: lbu s2, 27(a0) ; RV64I-NEXT: slli s5, s5, 8 ; RV64I-NEXT: slli s6, s6, 16 ; RV64I-NEXT: slli s7, s7, 24 -; RV64I-NEXT: or t1, s1, s0 -; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: slli s9, s9, 8 ; RV64I-NEXT: or t3, s5, s4 ; RV64I-NEXT: or t4, s7, s6 -; RV64I-NEXT: lbu s0, 24(a0) -; RV64I-NEXT: lbu s1, 25(a0) -; RV64I-NEXT: lbu s2, 26(a0) -; RV64I-NEXT: lbu s3, 27(a0) -; RV64I-NEXT: slli t6, t6, 8 -; RV64I-NEXT: slli s8, s8, 16 -; RV64I-NEXT: slli s9, s9, 24 -; RV64I-NEXT: slli s1, s1, 8 -; RV64I-NEXT: or t5, t6, t5 -; RV64I-NEXT: or t6, s9, s8 -; RV64I-NEXT: or s0, s1, s0 -; RV64I-NEXT: lbu s1, 28(a0) +; RV64I-NEXT: or t5, s9, s8 +; RV64I-NEXT: lbu s3, 28(a0) ; RV64I-NEXT: lbu s4, 29(a0) ; RV64I-NEXT: lbu s5, 30(a0) ; RV64I-NEXT: lbu s6, 31(a0) -; RV64I-NEXT: lbu a0, 0(a1) +; RV64I-NEXT: slli s10, s10, 16 +; RV64I-NEXT: slli s11, s11, 24 +; RV64I-NEXT: slli s0, s0, 8 +; RV64I-NEXT: slli s1, s1, 16 +; RV64I-NEXT: slli s2, s2, 24 +; RV64I-NEXT: slli s4, s4, 8 +; RV64I-NEXT: or a0, s11, s10 +; RV64I-NEXT: or t6, s0, t6 +; RV64I-NEXT: or s0, s2, s1 +; RV64I-NEXT: or s1, s4, s3 +; RV64I-NEXT: lbu s2, 0(a1) +; RV64I-NEXT: lbu s3, 1(a1) +; RV64I-NEXT: lbu s4, 2(a1) +; RV64I-NEXT: lbu s7, 3(a1) +; RV64I-NEXT: slli s5, s5, 16 +; RV64I-NEXT: slli s6, s6, 24 +; RV64I-NEXT: slli s3, s3, 8 +; RV64I-NEXT: slli s4, s4, 16 +; RV64I-NEXT: slli s7, s7, 24 +; RV64I-NEXT: or s5, s6, s5 +; RV64I-NEXT: or s2, s3, s2 +; RV64I-NEXT: or s3, s7, s4 +; RV64I-NEXT: lbu s4, 5(a1) +; RV64I-NEXT: lbu s6, 4(a1) +; RV64I-NEXT: lbu s7, 6(a1) +; RV64I-NEXT: lbu a1, 7(a1) +; RV64I-NEXT: slli s4, s4, 8 +; RV64I-NEXT: or s4, s4, s6 +; RV64I-NEXT: slli s7, s7, 16 +; RV64I-NEXT: slli a1, a1, 24 +; RV64I-NEXT: or a1, a1, s7 ; RV64I-NEXT: sd zero, 32(sp) ; RV64I-NEXT: sd zero, 40(sp) ; RV64I-NEXT: sd zero, 48(sp) ; RV64I-NEXT: sd zero, 56(sp) -; RV64I-NEXT: slli s2, s2, 16 -; RV64I-NEXT: slli s3, s3, 24 -; RV64I-NEXT: or a1, s3, s2 -; RV64I-NEXT: mv s2, sp -; RV64I-NEXT: slli s4, s4, 8 -; RV64I-NEXT: slli s5, s5, 16 -; RV64I-NEXT: slli s6, s6, 24 -; RV64I-NEXT: or s1, s4, s1 -; RV64I-NEXT: srli s3, a0, 3 -; RV64I-NEXT: or s4, s6, s5 -; RV64I-NEXT: andi s5, a0, 63 -; RV64I-NEXT: andi s3, s3, 24 -; RV64I-NEXT: xori s5, s5, 63 -; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a6, t2, t1 -; RV64I-NEXT: or a7, t4, t3 -; RV64I-NEXT: or t0, t6, t5 -; RV64I-NEXT: or a1, a1, s0 -; RV64I-NEXT: or t1, s4, s1 -; RV64I-NEXT: add s2, s2, s3 -; RV64I-NEXT: slli a4, a4, 32 -; RV64I-NEXT: slli a6, a6, 32 -; RV64I-NEXT: slli t0, t0, 32 -; RV64I-NEXT: slli t1, t1, 32 +; RV64I-NEXT: or a5, a6, a5 +; RV64I-NEXT: mv a6, sp ; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a1, t1, a1 +; RV64I-NEXT: or a4, t0, a7 +; RV64I-NEXT: or a7, t2, t1 +; RV64I-NEXT: or t0, t4, t3 +; RV64I-NEXT: or a0, a0, t5 +; RV64I-NEXT: or t1, s0, t6 +; RV64I-NEXT: or t2, s5, s1 +; RV64I-NEXT: or t3, s3, s2 +; RV64I-NEXT: or a1, a1, s4 +; RV64I-NEXT: slli a3, a3, 32 +; RV64I-NEXT: slli a7, a7, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: slli t2, t2, 32 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: or a3, a3, a5 +; RV64I-NEXT: or a4, a7, a4 +; RV64I-NEXT: or a0, a0, t0 +; RV64I-NEXT: or a5, t2, t1 +; RV64I-NEXT: or a1, a1, t3 ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: sd a4, 8(sp) -; RV64I-NEXT: sd a5, 16(sp) -; RV64I-NEXT: sd a1, 24(sp) -; RV64I-NEXT: ld a1, 8(s2) -; RV64I-NEXT: ld a3, 16(s2) -; RV64I-NEXT: ld a4, 0(s2) -; RV64I-NEXT: ld a5, 24(s2) -; RV64I-NEXT: srl a6, a1, a0 -; RV64I-NEXT: slli a7, a3, 1 -; RV64I-NEXT: srl a4, a4, a0 -; RV64I-NEXT: slli a1, a1, 1 -; RV64I-NEXT: srl a3, a3, a0 +; RV64I-NEXT: sd a0, 16(sp) +; RV64I-NEXT: sd a5, 24(sp) +; RV64I-NEXT: srli a0, a1, 3 +; RV64I-NEXT: andi a3, a1, 63 +; RV64I-NEXT: andi a0, a0, 24 +; RV64I-NEXT: xori a3, a3, 63 +; RV64I-NEXT: add a0, a6, a0 +; RV64I-NEXT: ld a4, 8(a0) +; RV64I-NEXT: ld a5, 16(a0) +; RV64I-NEXT: ld a6, 0(a0) +; RV64I-NEXT: ld a0, 24(a0) +; RV64I-NEXT: srl a7, a4, a1 ; RV64I-NEXT: slli t0, a5, 1 -; RV64I-NEXT: srl a5, a5, a0 -; RV64I-NEXT: sll a0, a7, s5 -; RV64I-NEXT: sll a1, a1, s5 -; RV64I-NEXT: sll a7, t0, s5 -; RV64I-NEXT: srli t0, a5, 56 -; RV64I-NEXT: srli t1, a5, 48 -; RV64I-NEXT: srli t2, a5, 40 -; RV64I-NEXT: srli t3, a5, 32 -; RV64I-NEXT: srli t4, a5, 24 -; RV64I-NEXT: srli t5, a5, 16 -; RV64I-NEXT: srli t6, a5, 8 -; RV64I-NEXT: or a0, a6, a0 -; RV64I-NEXT: or a1, a4, a1 -; RV64I-NEXT: or a3, a3, a7 +; RV64I-NEXT: srl a6, a6, a1 +; RV64I-NEXT: slli a4, a4, 1 +; RV64I-NEXT: srl a5, a5, a1 +; RV64I-NEXT: slli t1, a0, 1 +; RV64I-NEXT: srl t2, a0, a1 +; RV64I-NEXT: sll a0, t0, a3 +; RV64I-NEXT: sll a1, a4, a3 +; RV64I-NEXT: sll a3, t1, a3 +; RV64I-NEXT: srli a4, t2, 56 +; RV64I-NEXT: srli t0, t2, 48 +; RV64I-NEXT: srli t1, t2, 40 +; RV64I-NEXT: srli t3, t2, 32 +; RV64I-NEXT: srli t4, t2, 24 +; RV64I-NEXT: srli t5, t2, 16 +; RV64I-NEXT: srli t6, t2, 8 +; RV64I-NEXT: or a0, a7, a0 +; RV64I-NEXT: or a1, a6, a1 +; RV64I-NEXT: or a3, a5, a3 ; RV64I-NEXT: sb t3, 28(a2) -; RV64I-NEXT: sb t2, 29(a2) -; RV64I-NEXT: sb t1, 30(a2) -; RV64I-NEXT: sb t0, 31(a2) -; RV64I-NEXT: sb a5, 24(a2) +; RV64I-NEXT: sb t1, 29(a2) +; RV64I-NEXT: sb t0, 30(a2) +; RV64I-NEXT: sb a4, 31(a2) +; RV64I-NEXT: sb t2, 24(a2) ; RV64I-NEXT: sb t6, 25(a2) ; RV64I-NEXT: sb t5, 26(a2) ; RV64I-NEXT: sb t4, 27(a2) @@ -1463,17 +1513,19 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: sb a1, 9(a2) ; RV64I-NEXT: sb a5, 10(a2) ; RV64I-NEXT: sb a3, 11(a2) -; RV64I-NEXT: ld s0, 136(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s1, 128(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s2, 120(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s3, 112(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s4, 104(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s5, 96(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s6, 88(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s7, 80(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s8, 72(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s9, 64(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 144 +; RV64I-NEXT: ld s0, 152(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 144(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 136(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s3, 128(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s4, 120(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s5, 112(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s6, 104(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s7, 96(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s8, 88(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s9, 80(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s10, 72(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s11, 64(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 160 ; RV64I-NEXT: ret ; ; RV32I-LABEL: lshr_32bytes: @@ -1498,55 +1550,67 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: lbu a7, 3(a0) ; RV32I-NEXT: lbu a5, 4(a0) ; RV32I-NEXT: lbu t0, 5(a0) -; RV32I-NEXT: lbu t3, 6(a0) -; RV32I-NEXT: lbu t6, 7(a0) -; RV32I-NEXT: lbu s2, 8(a0) -; RV32I-NEXT: lbu s3, 9(a0) -; RV32I-NEXT: lbu s4, 10(a0) -; RV32I-NEXT: lbu s5, 11(a0) -; RV32I-NEXT: lbu s7, 12(a0) -; RV32I-NEXT: lbu s8, 13(a0) -; RV32I-NEXT: lbu s9, 14(a0) -; RV32I-NEXT: lbu s10, 15(a0) -; RV32I-NEXT: lbu s11, 16(a0) -; RV32I-NEXT: lbu ra, 17(a0) -; RV32I-NEXT: lbu t4, 18(a0) -; RV32I-NEXT: lbu s0, 19(a0) +; RV32I-NEXT: lbu t1, 6(a0) +; RV32I-NEXT: lbu t2, 7(a0) +; RV32I-NEXT: lbu t3, 8(a0) +; RV32I-NEXT: lbu t4, 9(a0) +; RV32I-NEXT: lbu t5, 10(a0) +; RV32I-NEXT: lbu t6, 11(a0) +; RV32I-NEXT: lbu s0, 12(a0) +; RV32I-NEXT: lbu s2, 13(a0) +; RV32I-NEXT: lbu s4, 14(a0) +; RV32I-NEXT: lbu s5, 15(a0) +; RV32I-NEXT: lbu s6, 16(a0) +; RV32I-NEXT: lbu s7, 17(a0) +; RV32I-NEXT: lbu s8, 18(a0) +; RV32I-NEXT: lbu s9, 19(a0) ; RV32I-NEXT: slli a4, a4, 8 ; RV32I-NEXT: slli a6, a6, 16 ; RV32I-NEXT: slli a7, a7, 24 ; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: or a4, a7, a6 -; RV32I-NEXT: lbu t1, 20(a0) -; RV32I-NEXT: lbu t2, 21(a0) -; RV32I-NEXT: lbu t5, 22(a0) -; RV32I-NEXT: lbu s1, 23(a0) +; RV32I-NEXT: lbu s10, 20(a0) +; RV32I-NEXT: lbu s11, 21(a0) +; RV32I-NEXT: lbu ra, 22(a0) +; RV32I-NEXT: lbu a3, 23(a0) ; RV32I-NEXT: slli t0, t0, 8 -; RV32I-NEXT: slli t3, t3, 16 +; RV32I-NEXT: slli t1, t1, 16 +; RV32I-NEXT: slli t2, t2, 24 +; RV32I-NEXT: slli t4, t4, 8 +; RV32I-NEXT: slli t5, t5, 16 ; RV32I-NEXT: slli t6, t6, 24 -; RV32I-NEXT: slli s3, s3, 8 +; RV32I-NEXT: or a5, t0, a5 +; RV32I-NEXT: or a6, t2, t1 +; RV32I-NEXT: or a7, t4, t3 +; RV32I-NEXT: or t0, t6, t5 +; RV32I-NEXT: lbu s1, 24(a0) +; RV32I-NEXT: lbu s3, 25(a0) +; RV32I-NEXT: lbu t4, 26(a0) +; RV32I-NEXT: lbu t5, 27(a0) +; RV32I-NEXT: slli s2, s2, 8 ; RV32I-NEXT: slli s4, s4, 16 ; RV32I-NEXT: slli s5, s5, 24 -; RV32I-NEXT: or a5, t0, a5 -; RV32I-NEXT: or a6, t6, t3 -; RV32I-NEXT: or a7, s3, s2 -; RV32I-NEXT: or t0, s5, s4 -; RV32I-NEXT: lbu t3, 24(a0) -; RV32I-NEXT: lbu s5, 25(a0) -; RV32I-NEXT: lbu s6, 26(a0) -; RV32I-NEXT: lbu t6, 27(a0) -; RV32I-NEXT: slli s8, s8, 8 -; RV32I-NEXT: slli s9, s9, 16 -; RV32I-NEXT: slli s10, s10, 24 -; RV32I-NEXT: slli ra, ra, 8 -; RV32I-NEXT: or s7, s8, s7 -; RV32I-NEXT: or s2, s10, s9 -; RV32I-NEXT: or s3, ra, s11 -; RV32I-NEXT: lbu s4, 28(a0) -; RV32I-NEXT: lbu s8, 29(a0) -; RV32I-NEXT: lbu s9, 30(a0) -; RV32I-NEXT: lbu s10, 31(a0) -; RV32I-NEXT: lbu a0, 0(a1) +; RV32I-NEXT: slli s7, s7, 8 +; RV32I-NEXT: or t1, s2, s0 +; RV32I-NEXT: or t2, s5, s4 +; RV32I-NEXT: or t3, s7, s6 +; RV32I-NEXT: lbu t6, 28(a0) +; RV32I-NEXT: lbu s4, 29(a0) +; RV32I-NEXT: lbu s5, 30(a0) +; RV32I-NEXT: lbu s6, 31(a0) +; RV32I-NEXT: slli s8, s8, 16 +; RV32I-NEXT: slli s9, s9, 24 +; RV32I-NEXT: slli s11, s11, 8 +; RV32I-NEXT: slli ra, ra, 16 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: or a0, s9, s8 +; RV32I-NEXT: or s0, s11, s10 +; RV32I-NEXT: or s2, a3, ra +; RV32I-NEXT: lbu a3, 0(a1) +; RV32I-NEXT: lbu s7, 1(a1) +; RV32I-NEXT: lbu s8, 2(a1) +; RV32I-NEXT: lbu a1, 3(a1) ; RV32I-NEXT: sw zero, 56(sp) ; RV32I-NEXT: sw zero, 60(sp) ; RV32I-NEXT: sw zero, 64(sp) @@ -1555,90 +1619,89 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: sw zero, 44(sp) ; RV32I-NEXT: sw zero, 48(sp) ; RV32I-NEXT: sw zero, 52(sp) +; RV32I-NEXT: slli s3, s3, 8 +; RV32I-NEXT: or s1, s3, s1 +; RV32I-NEXT: addi s3, sp, 8 ; RV32I-NEXT: slli t4, t4, 16 -; RV32I-NEXT: slli s0, s0, 24 -; RV32I-NEXT: or t4, s0, t4 -; RV32I-NEXT: addi s0, sp, 8 -; RV32I-NEXT: slli t2, t2, 8 -; RV32I-NEXT: slli t5, t5, 16 -; RV32I-NEXT: slli s1, s1, 24 -; RV32I-NEXT: slli s5, s5, 8 -; RV32I-NEXT: slli s6, s6, 16 -; RV32I-NEXT: slli t6, t6, 24 -; RV32I-NEXT: slli s8, s8, 8 -; RV32I-NEXT: slli s9, s9, 16 -; RV32I-NEXT: slli s10, s10, 24 -; RV32I-NEXT: or t1, t2, t1 +; RV32I-NEXT: slli t5, t5, 24 +; RV32I-NEXT: slli s4, s4, 8 +; RV32I-NEXT: slli s5, s5, 16 +; RV32I-NEXT: slli s6, s6, 24 +; RV32I-NEXT: slli s7, s7, 8 +; RV32I-NEXT: slli s8, s8, 16 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or t4, t5, t4 +; RV32I-NEXT: or t5, s4, t6 +; RV32I-NEXT: or t6, s6, s5 +; RV32I-NEXT: or a3, s7, a3 +; RV32I-NEXT: or a1, a1, s8 +; RV32I-NEXT: lw s4, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: or a4, a4, s4 +; RV32I-NEXT: or a5, a6, a5 +; RV32I-NEXT: or a6, t0, a7 +; RV32I-NEXT: or a7, t2, t1 +; RV32I-NEXT: or t0, a0, t3 +; RV32I-NEXT: or t1, s2, s0 +; RV32I-NEXT: or t2, t4, s1 +; RV32I-NEXT: or t3, t6, t5 +; RV32I-NEXT: or a0, a1, a3 +; RV32I-NEXT: sw t0, 24(sp) +; RV32I-NEXT: sw t1, 28(sp) +; RV32I-NEXT: sw t2, 32(sp) +; RV32I-NEXT: sw t3, 36(sp) +; RV32I-NEXT: sw a4, 8(sp) +; RV32I-NEXT: sw a5, 12(sp) +; RV32I-NEXT: sw a6, 16(sp) +; RV32I-NEXT: sw a7, 20(sp) ; RV32I-NEXT: srli a1, a0, 3 -; RV32I-NEXT: or t2, s1, t5 -; RV32I-NEXT: andi t5, a0, 31 -; RV32I-NEXT: or t3, s5, t3 -; RV32I-NEXT: or t6, t6, s6 -; RV32I-NEXT: or s1, s8, s4 -; RV32I-NEXT: or s4, s10, s9 -; RV32I-NEXT: andi s5, a1, 28 -; RV32I-NEXT: xori a1, t5, 31 -; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: or a4, a6, a5 -; RV32I-NEXT: or a5, t0, a7 -; RV32I-NEXT: or a6, s2, s7 -; RV32I-NEXT: or a7, t4, s3 -; RV32I-NEXT: or t0, t2, t1 -; RV32I-NEXT: or t1, t6, t3 -; RV32I-NEXT: or t2, s4, s1 -; RV32I-NEXT: add s0, s0, s5 -; RV32I-NEXT: sw a7, 24(sp) -; RV32I-NEXT: sw t0, 28(sp) -; RV32I-NEXT: sw t1, 32(sp) -; RV32I-NEXT: sw t2, 36(sp) -; RV32I-NEXT: sw a3, 8(sp) -; RV32I-NEXT: sw a4, 12(sp) -; RV32I-NEXT: sw a5, 16(sp) -; RV32I-NEXT: sw a6, 20(sp) -; RV32I-NEXT: lw a3, 0(s0) -; RV32I-NEXT: lw a4, 4(s0) -; RV32I-NEXT: lw a5, 8(s0) -; RV32I-NEXT: lw a6, 12(s0) -; RV32I-NEXT: lw a7, 16(s0) -; RV32I-NEXT: lw t0, 20(s0) -; RV32I-NEXT: lw t1, 24(s0) -; RV32I-NEXT: lw t2, 28(s0) -; RV32I-NEXT: srl t3, a4, a0 -; RV32I-NEXT: slli t4, a5, 1 +; RV32I-NEXT: andi a3, a0, 31 +; RV32I-NEXT: andi a4, a1, 28 +; RV32I-NEXT: xori a1, a3, 31 +; RV32I-NEXT: add a4, s3, a4 +; RV32I-NEXT: lw a3, 0(a4) +; RV32I-NEXT: lw a5, 4(a4) +; RV32I-NEXT: lw a6, 8(a4) +; RV32I-NEXT: lw a7, 12(a4) +; RV32I-NEXT: lw t0, 16(a4) +; RV32I-NEXT: lw t1, 20(a4) +; RV32I-NEXT: lw t2, 24(a4) +; RV32I-NEXT: lw a4, 28(a4) +; RV32I-NEXT: srl t3, a5, a0 +; RV32I-NEXT: slli t4, a6, 1 ; RV32I-NEXT: srl a3, a3, a0 -; RV32I-NEXT: slli a4, a4, 1 -; RV32I-NEXT: srl t5, a6, a0 -; RV32I-NEXT: slli t6, a7, 1 -; RV32I-NEXT: srl a5, a5, a0 -; RV32I-NEXT: slli a6, a6, 1 -; RV32I-NEXT: srl s0, t0, a0 -; RV32I-NEXT: slli s1, t1, 1 -; RV32I-NEXT: srl a7, a7, a0 -; RV32I-NEXT: slli t0, t0, 1 -; RV32I-NEXT: srl t1, t1, a0 -; RV32I-NEXT: slli s2, t2, 1 +; RV32I-NEXT: slli a5, a5, 1 +; RV32I-NEXT: srl t5, a7, a0 +; RV32I-NEXT: slli t6, t0, 1 +; RV32I-NEXT: srl a6, a6, a0 +; RV32I-NEXT: slli a7, a7, 1 +; RV32I-NEXT: srl s0, t1, a0 +; RV32I-NEXT: slli s1, t2, 1 +; RV32I-NEXT: srl t0, t0, a0 +; RV32I-NEXT: slli t1, t1, 1 ; RV32I-NEXT: srl t2, t2, a0 +; RV32I-NEXT: slli s2, a4, 1 +; RV32I-NEXT: srl s3, a4, a0 ; RV32I-NEXT: sll a0, t4, a1 -; RV32I-NEXT: sll a4, a4, a1 -; RV32I-NEXT: sll t4, t6, a1 -; RV32I-NEXT: sll a6, a6, a1 -; RV32I-NEXT: sll t6, s1, a1 -; RV32I-NEXT: sll t0, t0, a1 -; RV32I-NEXT: sll s1, s2, a1 -; RV32I-NEXT: srli s2, t2, 24 -; RV32I-NEXT: srli s3, t2, 16 -; RV32I-NEXT: srli s4, t2, 8 +; RV32I-NEXT: sll a4, a5, a1 +; RV32I-NEXT: sll a5, t6, a1 +; RV32I-NEXT: sll a7, a7, a1 +; RV32I-NEXT: sll t4, s1, a1 +; RV32I-NEXT: sll t1, t1, a1 +; RV32I-NEXT: sll t6, s2, a1 +; RV32I-NEXT: srli s1, s3, 24 +; RV32I-NEXT: srli s2, s3, 16 +; RV32I-NEXT: srli s4, s3, 8 ; RV32I-NEXT: or a0, t3, a0 ; RV32I-NEXT: or a1, a3, a4 -; RV32I-NEXT: or a3, t5, t4 -; RV32I-NEXT: or a4, a5, a6 -; RV32I-NEXT: or a5, s0, t6 -; RV32I-NEXT: or a6, a7, t0 -; RV32I-NEXT: or a7, t1, s1 -; RV32I-NEXT: sb t2, 28(a2) +; RV32I-NEXT: or a3, t5, a5 +; RV32I-NEXT: or a4, a6, a7 +; RV32I-NEXT: or a5, s0, t4 +; RV32I-NEXT: or a6, t0, t1 +; RV32I-NEXT: or a7, t2, t6 +; RV32I-NEXT: sb s3, 28(a2) ; RV32I-NEXT: sb s4, 29(a2) -; RV32I-NEXT: sb s3, 30(a2) -; RV32I-NEXT: sb s2, 31(a2) +; RV32I-NEXT: sb s2, 30(a2) +; RV32I-NEXT: sb s1, 31(a2) ; RV32I-NEXT: srli t0, a7, 24 ; RV32I-NEXT: srli t1, a7, 16 ; RV32I-NEXT: srli t2, a7, 8 @@ -1712,17 +1775,19 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-LABEL: shl_32bytes: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -144 -; RV64I-NEXT: sd s0, 136(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s1, 128(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s2, 120(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s3, 112(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s4, 104(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s5, 96(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s6, 88(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s7, 80(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s8, 72(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s9, 64(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi sp, sp, -160 +; RV64I-NEXT: sd s0, 152(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 144(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 136(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s3, 128(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s4, 120(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s5, 112(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s6, 104(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s7, 96(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s8, 88(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s9, 80(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s10, 72(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s11, 64(sp) # 8-byte Folded Spill ; RV64I-NEXT: lbu a3, 0(a0) ; RV64I-NEXT: lbu a4, 1(a0) ; RV64I-NEXT: lbu a5, 2(a0) @@ -1739,125 +1804,146 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: lbu s1, 13(a0) ; RV64I-NEXT: lbu s2, 14(a0) ; RV64I-NEXT: lbu s3, 15(a0) -; RV64I-NEXT: slli a4, a4, 8 -; RV64I-NEXT: slli a5, a5, 16 -; RV64I-NEXT: slli a6, a6, 24 -; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 ; RV64I-NEXT: lbu s4, 16(a0) ; RV64I-NEXT: lbu s5, 17(a0) ; RV64I-NEXT: lbu s6, 18(a0) ; RV64I-NEXT: lbu s7, 19(a0) +; RV64I-NEXT: slli a4, a4, 8 +; RV64I-NEXT: slli s8, a5, 16 +; RV64I-NEXT: slli a6, a6, 24 ; RV64I-NEXT: slli t0, t0, 8 ; RV64I-NEXT: slli t1, t1, 16 ; RV64I-NEXT: slli t2, t2, 24 +; RV64I-NEXT: or a5, a4, a3 +; RV64I-NEXT: or a6, a6, s8 +; RV64I-NEXT: or a3, t0, a7 +; RV64I-NEXT: or a4, t2, t1 +; RV64I-NEXT: lbu s8, 20(a0) +; RV64I-NEXT: lbu s9, 21(a0) +; RV64I-NEXT: lbu s10, 22(a0) +; RV64I-NEXT: lbu s11, 23(a0) ; RV64I-NEXT: slli t4, t4, 8 ; RV64I-NEXT: slli t5, t5, 16 ; RV64I-NEXT: slli t6, t6, 24 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a6, t2, t1 -; RV64I-NEXT: or a7, t4, t3 -; RV64I-NEXT: or t0, t6, t5 -; RV64I-NEXT: lbu t5, 20(a0) -; RV64I-NEXT: lbu t6, 21(a0) -; RV64I-NEXT: lbu s8, 22(a0) -; RV64I-NEXT: lbu s9, 23(a0) ; RV64I-NEXT: slli s1, s1, 8 ; RV64I-NEXT: slli s2, s2, 16 ; RV64I-NEXT: slli s3, s3, 24 +; RV64I-NEXT: or a7, t4, t3 +; RV64I-NEXT: or t0, t6, t5 +; RV64I-NEXT: or t1, s1, s0 +; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: lbu t6, 24(a0) +; RV64I-NEXT: lbu s0, 25(a0) +; RV64I-NEXT: lbu s1, 26(a0) +; RV64I-NEXT: lbu s2, 27(a0) ; RV64I-NEXT: slli s5, s5, 8 ; RV64I-NEXT: slli s6, s6, 16 ; RV64I-NEXT: slli s7, s7, 24 -; RV64I-NEXT: or t1, s1, s0 -; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: slli s9, s9, 8 ; RV64I-NEXT: or t3, s5, s4 ; RV64I-NEXT: or t4, s7, s6 -; RV64I-NEXT: lbu s0, 24(a0) -; RV64I-NEXT: lbu s1, 25(a0) -; RV64I-NEXT: lbu s2, 26(a0) -; RV64I-NEXT: lbu s3, 27(a0) -; RV64I-NEXT: slli t6, t6, 8 -; RV64I-NEXT: slli s8, s8, 16 -; RV64I-NEXT: slli s9, s9, 24 -; RV64I-NEXT: slli s1, s1, 8 -; RV64I-NEXT: or t5, t6, t5 -; RV64I-NEXT: or t6, s9, s8 -; RV64I-NEXT: or s0, s1, s0 -; RV64I-NEXT: lbu s1, 28(a0) +; RV64I-NEXT: or t5, s9, s8 +; RV64I-NEXT: lbu s3, 28(a0) ; RV64I-NEXT: lbu s4, 29(a0) ; RV64I-NEXT: lbu s5, 30(a0) ; RV64I-NEXT: lbu s6, 31(a0) -; RV64I-NEXT: lbu a0, 0(a1) +; RV64I-NEXT: slli s10, s10, 16 +; RV64I-NEXT: slli s11, s11, 24 +; RV64I-NEXT: slli s0, s0, 8 +; RV64I-NEXT: slli s1, s1, 16 +; RV64I-NEXT: slli s2, s2, 24 +; RV64I-NEXT: slli s4, s4, 8 +; RV64I-NEXT: or a0, s11, s10 +; RV64I-NEXT: or t6, s0, t6 +; RV64I-NEXT: or s0, s2, s1 +; RV64I-NEXT: or s1, s4, s3 +; RV64I-NEXT: lbu s2, 0(a1) +; RV64I-NEXT: lbu s3, 1(a1) +; RV64I-NEXT: lbu s4, 2(a1) +; RV64I-NEXT: lbu s7, 3(a1) +; RV64I-NEXT: slli s5, s5, 16 +; RV64I-NEXT: slli s6, s6, 24 +; RV64I-NEXT: slli s3, s3, 8 +; RV64I-NEXT: slli s4, s4, 16 +; RV64I-NEXT: slli s7, s7, 24 +; RV64I-NEXT: or s5, s6, s5 +; RV64I-NEXT: or s2, s3, s2 +; RV64I-NEXT: or s3, s7, s4 +; RV64I-NEXT: lbu s4, 5(a1) +; RV64I-NEXT: lbu s6, 4(a1) +; RV64I-NEXT: lbu s7, 6(a1) +; RV64I-NEXT: lbu a1, 7(a1) +; RV64I-NEXT: slli s4, s4, 8 +; RV64I-NEXT: or s4, s4, s6 +; RV64I-NEXT: slli s7, s7, 16 +; RV64I-NEXT: slli a1, a1, 24 +; RV64I-NEXT: or a1, a1, s7 ; RV64I-NEXT: sd zero, 0(sp) ; RV64I-NEXT: sd zero, 8(sp) ; RV64I-NEXT: sd zero, 16(sp) ; RV64I-NEXT: sd zero, 24(sp) -; RV64I-NEXT: slli s2, s2, 16 -; RV64I-NEXT: slli s3, s3, 24 -; RV64I-NEXT: or a1, s3, s2 -; RV64I-NEXT: addi s2, sp, 32 -; RV64I-NEXT: slli s4, s4, 8 -; RV64I-NEXT: slli s5, s5, 16 -; RV64I-NEXT: slli s6, s6, 24 -; RV64I-NEXT: or s1, s4, s1 -; RV64I-NEXT: srli s3, a0, 3 -; RV64I-NEXT: or s4, s6, s5 -; RV64I-NEXT: andi s5, a0, 63 -; RV64I-NEXT: andi s3, s3, 24 -; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a6, t2, t1 -; RV64I-NEXT: or a7, t4, t3 -; RV64I-NEXT: or t0, t6, t5 -; RV64I-NEXT: or a1, a1, s0 -; RV64I-NEXT: or t1, s4, s1 -; RV64I-NEXT: sub t2, s2, s3 -; RV64I-NEXT: slli a4, a4, 32 -; RV64I-NEXT: slli a6, a6, 32 -; RV64I-NEXT: slli t0, t0, 32 -; RV64I-NEXT: slli t1, t1, 32 +; RV64I-NEXT: or a5, a6, a5 +; RV64I-NEXT: addi a6, sp, 32 ; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a1, t1, a1 +; RV64I-NEXT: or a4, t0, a7 +; RV64I-NEXT: or a7, t2, t1 +; RV64I-NEXT: or t0, t4, t3 +; RV64I-NEXT: or a0, a0, t5 +; RV64I-NEXT: or t1, s0, t6 +; RV64I-NEXT: or t2, s5, s1 +; RV64I-NEXT: or t3, s3, s2 +; RV64I-NEXT: or a1, a1, s4 +; RV64I-NEXT: slli a3, a3, 32 +; RV64I-NEXT: slli a7, a7, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: slli t2, t2, 32 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: or a3, a3, a5 +; RV64I-NEXT: or a4, a7, a4 +; RV64I-NEXT: or a0, a0, t0 +; RV64I-NEXT: or a5, t2, t1 +; RV64I-NEXT: or a1, a1, t3 ; RV64I-NEXT: sd a3, 32(sp) ; RV64I-NEXT: sd a4, 40(sp) -; RV64I-NEXT: sd a5, 48(sp) -; RV64I-NEXT: sd a1, 56(sp) -; RV64I-NEXT: ld a1, 0(t2) -; RV64I-NEXT: ld a3, 8(t2) -; RV64I-NEXT: ld a4, 16(t2) -; RV64I-NEXT: ld a5, 24(t2) -; RV64I-NEXT: xori a6, s5, 63 -; RV64I-NEXT: sll a7, a3, a0 -; RV64I-NEXT: srli t0, a1, 1 -; RV64I-NEXT: sll a5, a5, a0 -; RV64I-NEXT: srli t1, a4, 1 -; RV64I-NEXT: sll a4, a4, a0 -; RV64I-NEXT: srli a3, a3, 1 -; RV64I-NEXT: sll t2, a1, a0 -; RV64I-NEXT: srl a0, t0, a6 -; RV64I-NEXT: srl a1, t1, a6 -; RV64I-NEXT: srl a3, a3, a6 -; RV64I-NEXT: srli a6, t2, 56 -; RV64I-NEXT: srli t0, t2, 48 -; RV64I-NEXT: srli t1, t2, 40 -; RV64I-NEXT: srli t3, t2, 32 -; RV64I-NEXT: srli t4, t2, 24 -; RV64I-NEXT: srli t5, t2, 16 -; RV64I-NEXT: srli t6, t2, 8 -; RV64I-NEXT: or a0, a7, a0 -; RV64I-NEXT: or a1, a5, a1 -; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: sb t3, 4(a2) -; RV64I-NEXT: sb t1, 5(a2) -; RV64I-NEXT: sb t0, 6(a2) -; RV64I-NEXT: sb a6, 7(a2) -; RV64I-NEXT: sb t2, 0(a2) -; RV64I-NEXT: sb t6, 1(a2) -; RV64I-NEXT: sb t5, 2(a2) -; RV64I-NEXT: sb t4, 3(a2) +; RV64I-NEXT: sd a0, 48(sp) +; RV64I-NEXT: sd a5, 56(sp) +; RV64I-NEXT: srli a0, a1, 3 +; RV64I-NEXT: andi a3, a1, 63 +; RV64I-NEXT: andi a0, a0, 24 +; RV64I-NEXT: sub a0, a6, a0 +; RV64I-NEXT: ld a4, 0(a0) +; RV64I-NEXT: ld a5, 8(a0) +; RV64I-NEXT: ld a6, 16(a0) +; RV64I-NEXT: ld a0, 24(a0) +; RV64I-NEXT: xori a3, a3, 63 +; RV64I-NEXT: sll a7, a5, a1 +; RV64I-NEXT: srli t0, a4, 1 +; RV64I-NEXT: sll t1, a0, a1 +; RV64I-NEXT: srli a0, a6, 1 +; RV64I-NEXT: sll a6, a6, a1 +; RV64I-NEXT: srli a5, a5, 1 +; RV64I-NEXT: sll a4, a4, a1 +; RV64I-NEXT: srl a1, t0, a3 +; RV64I-NEXT: srl t0, a0, a3 +; RV64I-NEXT: srl a3, a5, a3 +; RV64I-NEXT: srli a5, a4, 56 +; RV64I-NEXT: srli t2, a4, 48 +; RV64I-NEXT: srli t3, a4, 40 +; RV64I-NEXT: srli t4, a4, 32 +; RV64I-NEXT: srli t5, a4, 24 +; RV64I-NEXT: srli t6, a4, 16 +; RV64I-NEXT: srli s0, a4, 8 +; RV64I-NEXT: or a0, a7, a1 +; RV64I-NEXT: or a1, t1, t0 +; RV64I-NEXT: or a3, a6, a3 +; RV64I-NEXT: sb t4, 4(a2) +; RV64I-NEXT: sb t3, 5(a2) +; RV64I-NEXT: sb t2, 6(a2) +; RV64I-NEXT: sb a5, 7(a2) +; RV64I-NEXT: sb a4, 0(a2) +; RV64I-NEXT: sb s0, 1(a2) +; RV64I-NEXT: sb t6, 2(a2) +; RV64I-NEXT: sb t5, 3(a2) ; RV64I-NEXT: srli a4, a3, 56 ; RV64I-NEXT: srli a5, a3, 48 ; RV64I-NEXT: srli a6, a3, 40 @@ -1903,17 +1989,19 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: sb a1, 9(a2) ; RV64I-NEXT: sb a5, 10(a2) ; RV64I-NEXT: sb a3, 11(a2) -; RV64I-NEXT: ld s0, 136(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s1, 128(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s2, 120(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s3, 112(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s4, 104(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s5, 96(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s6, 88(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s7, 80(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s8, 72(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s9, 64(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 144 +; RV64I-NEXT: ld s0, 152(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 144(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 136(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s3, 128(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s4, 120(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s5, 112(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s6, 104(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s7, 96(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s8, 88(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s9, 80(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s10, 72(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s11, 64(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 160 ; RV64I-NEXT: ret ; ; RV32I-LABEL: shl_32bytes: @@ -1938,55 +2026,67 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: lbu a7, 3(a0) ; RV32I-NEXT: lbu a5, 4(a0) ; RV32I-NEXT: lbu t0, 5(a0) -; RV32I-NEXT: lbu t3, 6(a0) -; RV32I-NEXT: lbu t6, 7(a0) -; RV32I-NEXT: lbu s2, 8(a0) -; RV32I-NEXT: lbu s3, 9(a0) -; RV32I-NEXT: lbu s4, 10(a0) -; RV32I-NEXT: lbu s5, 11(a0) -; RV32I-NEXT: lbu s7, 12(a0) -; RV32I-NEXT: lbu s8, 13(a0) -; RV32I-NEXT: lbu s9, 14(a0) -; RV32I-NEXT: lbu s10, 15(a0) -; RV32I-NEXT: lbu s11, 16(a0) -; RV32I-NEXT: lbu ra, 17(a0) -; RV32I-NEXT: lbu t4, 18(a0) -; RV32I-NEXT: lbu s0, 19(a0) +; RV32I-NEXT: lbu t1, 6(a0) +; RV32I-NEXT: lbu t2, 7(a0) +; RV32I-NEXT: lbu t3, 8(a0) +; RV32I-NEXT: lbu t4, 9(a0) +; RV32I-NEXT: lbu t5, 10(a0) +; RV32I-NEXT: lbu t6, 11(a0) +; RV32I-NEXT: lbu s0, 12(a0) +; RV32I-NEXT: lbu s2, 13(a0) +; RV32I-NEXT: lbu s4, 14(a0) +; RV32I-NEXT: lbu s5, 15(a0) +; RV32I-NEXT: lbu s6, 16(a0) +; RV32I-NEXT: lbu s7, 17(a0) +; RV32I-NEXT: lbu s8, 18(a0) +; RV32I-NEXT: lbu s9, 19(a0) ; RV32I-NEXT: slli a4, a4, 8 ; RV32I-NEXT: slli a6, a6, 16 ; RV32I-NEXT: slli a7, a7, 24 ; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: or a4, a7, a6 -; RV32I-NEXT: lbu t1, 20(a0) -; RV32I-NEXT: lbu t2, 21(a0) -; RV32I-NEXT: lbu t5, 22(a0) -; RV32I-NEXT: lbu s1, 23(a0) +; RV32I-NEXT: lbu s10, 20(a0) +; RV32I-NEXT: lbu s11, 21(a0) +; RV32I-NEXT: lbu ra, 22(a0) +; RV32I-NEXT: lbu a3, 23(a0) ; RV32I-NEXT: slli t0, t0, 8 -; RV32I-NEXT: slli t3, t3, 16 +; RV32I-NEXT: slli t1, t1, 16 +; RV32I-NEXT: slli t2, t2, 24 +; RV32I-NEXT: slli t4, t4, 8 +; RV32I-NEXT: slli t5, t5, 16 ; RV32I-NEXT: slli t6, t6, 24 -; RV32I-NEXT: slli s3, s3, 8 +; RV32I-NEXT: or a5, t0, a5 +; RV32I-NEXT: or a6, t2, t1 +; RV32I-NEXT: or a7, t4, t3 +; RV32I-NEXT: or t0, t6, t5 +; RV32I-NEXT: lbu s1, 24(a0) +; RV32I-NEXT: lbu s3, 25(a0) +; RV32I-NEXT: lbu t4, 26(a0) +; RV32I-NEXT: lbu t5, 27(a0) +; RV32I-NEXT: slli s2, s2, 8 ; RV32I-NEXT: slli s4, s4, 16 ; RV32I-NEXT: slli s5, s5, 24 -; RV32I-NEXT: or a5, t0, a5 -; RV32I-NEXT: or a6, t6, t3 -; RV32I-NEXT: or a7, s3, s2 -; RV32I-NEXT: or t0, s5, s4 -; RV32I-NEXT: lbu t3, 24(a0) -; RV32I-NEXT: lbu s5, 25(a0) -; RV32I-NEXT: lbu s6, 26(a0) -; RV32I-NEXT: lbu t6, 27(a0) -; RV32I-NEXT: slli s8, s8, 8 -; RV32I-NEXT: slli s9, s9, 16 -; RV32I-NEXT: slli s10, s10, 24 -; RV32I-NEXT: slli ra, ra, 8 -; RV32I-NEXT: or s7, s8, s7 -; RV32I-NEXT: or s2, s10, s9 -; RV32I-NEXT: or s3, ra, s11 -; RV32I-NEXT: lbu s4, 28(a0) -; RV32I-NEXT: lbu s8, 29(a0) -; RV32I-NEXT: lbu s9, 30(a0) -; RV32I-NEXT: lbu s10, 31(a0) -; RV32I-NEXT: lbu a0, 0(a1) +; RV32I-NEXT: slli s7, s7, 8 +; RV32I-NEXT: or t1, s2, s0 +; RV32I-NEXT: or t2, s5, s4 +; RV32I-NEXT: or t3, s7, s6 +; RV32I-NEXT: lbu t6, 28(a0) +; RV32I-NEXT: lbu s4, 29(a0) +; RV32I-NEXT: lbu s5, 30(a0) +; RV32I-NEXT: lbu s6, 31(a0) +; RV32I-NEXT: slli s8, s8, 16 +; RV32I-NEXT: slli s9, s9, 24 +; RV32I-NEXT: slli s11, s11, 8 +; RV32I-NEXT: slli ra, ra, 16 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: or a0, s9, s8 +; RV32I-NEXT: or s0, s11, s10 +; RV32I-NEXT: or s2, a3, ra +; RV32I-NEXT: lbu a3, 0(a1) +; RV32I-NEXT: lbu s7, 1(a1) +; RV32I-NEXT: lbu s8, 2(a1) +; RV32I-NEXT: lbu a1, 3(a1) ; RV32I-NEXT: sw zero, 24(sp) ; RV32I-NEXT: sw zero, 28(sp) ; RV32I-NEXT: sw zero, 32(sp) @@ -1995,89 +2095,88 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: sw zero, 12(sp) ; RV32I-NEXT: sw zero, 16(sp) ; RV32I-NEXT: sw zero, 20(sp) +; RV32I-NEXT: slli s3, s3, 8 +; RV32I-NEXT: or s1, s3, s1 +; RV32I-NEXT: addi s3, sp, 40 ; RV32I-NEXT: slli t4, t4, 16 -; RV32I-NEXT: slli s0, s0, 24 -; RV32I-NEXT: or t4, s0, t4 -; RV32I-NEXT: addi s0, sp, 40 -; RV32I-NEXT: slli t2, t2, 8 -; RV32I-NEXT: slli t5, t5, 16 -; RV32I-NEXT: slli s1, s1, 24 -; RV32I-NEXT: slli s5, s5, 8 -; RV32I-NEXT: slli s6, s6, 16 -; RV32I-NEXT: slli t6, t6, 24 -; RV32I-NEXT: slli s8, s8, 8 -; RV32I-NEXT: slli s9, s9, 16 -; RV32I-NEXT: slli s10, s10, 24 -; RV32I-NEXT: or t1, t2, t1 +; RV32I-NEXT: slli t5, t5, 24 +; RV32I-NEXT: slli s4, s4, 8 +; RV32I-NEXT: slli s5, s5, 16 +; RV32I-NEXT: slli s6, s6, 24 +; RV32I-NEXT: slli s7, s7, 8 +; RV32I-NEXT: slli s8, s8, 16 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or t4, t5, t4 +; RV32I-NEXT: or t5, s4, t6 +; RV32I-NEXT: or t6, s6, s5 +; RV32I-NEXT: or a3, s7, a3 +; RV32I-NEXT: or a1, a1, s8 +; RV32I-NEXT: lw s4, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: or a4, a4, s4 +; RV32I-NEXT: or a5, a6, a5 +; RV32I-NEXT: or a6, t0, a7 +; RV32I-NEXT: or a7, t2, t1 +; RV32I-NEXT: or t0, a0, t3 +; RV32I-NEXT: or t1, s2, s0 +; RV32I-NEXT: or t2, t4, s1 +; RV32I-NEXT: or t3, t6, t5 +; RV32I-NEXT: or a0, a1, a3 +; RV32I-NEXT: sw t0, 56(sp) +; RV32I-NEXT: sw t1, 60(sp) +; RV32I-NEXT: sw t2, 64(sp) +; RV32I-NEXT: sw t3, 68(sp) +; RV32I-NEXT: sw a4, 40(sp) +; RV32I-NEXT: sw a5, 44(sp) +; RV32I-NEXT: sw a6, 48(sp) +; RV32I-NEXT: sw a7, 52(sp) ; RV32I-NEXT: srli a1, a0, 3 -; RV32I-NEXT: or t2, s1, t5 -; RV32I-NEXT: andi t5, a0, 31 -; RV32I-NEXT: or t3, s5, t3 -; RV32I-NEXT: or t6, t6, s6 -; RV32I-NEXT: or s1, s8, s4 -; RV32I-NEXT: or s4, s10, s9 -; RV32I-NEXT: andi s5, a1, 28 -; RV32I-NEXT: xori a1, t5, 31 -; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: or a4, a6, a5 -; RV32I-NEXT: or a5, t0, a7 -; RV32I-NEXT: or a6, s2, s7 -; RV32I-NEXT: or a7, t4, s3 -; RV32I-NEXT: or t0, t2, t1 -; RV32I-NEXT: or t1, t6, t3 -; RV32I-NEXT: or t2, s4, s1 -; RV32I-NEXT: sub t3, s0, s5 -; RV32I-NEXT: sw a7, 56(sp) -; RV32I-NEXT: sw t0, 60(sp) -; RV32I-NEXT: sw t1, 64(sp) -; RV32I-NEXT: sw t2, 68(sp) -; RV32I-NEXT: sw a3, 40(sp) -; RV32I-NEXT: sw a4, 44(sp) -; RV32I-NEXT: sw a5, 48(sp) -; RV32I-NEXT: sw a6, 52(sp) -; RV32I-NEXT: lw a3, 0(t3) -; RV32I-NEXT: lw a4, 4(t3) -; RV32I-NEXT: lw a5, 8(t3) -; RV32I-NEXT: lw a6, 12(t3) -; RV32I-NEXT: lw a7, 16(t3) -; RV32I-NEXT: lw t0, 20(t3) -; RV32I-NEXT: lw t1, 24(t3) -; RV32I-NEXT: lw t2, 28(t3) -; RV32I-NEXT: sll t3, a4, a0 -; RV32I-NEXT: srli t4, a3, 1 -; RV32I-NEXT: sll t5, a6, a0 -; RV32I-NEXT: srli t6, a5, 1 -; RV32I-NEXT: sll a5, a5, a0 -; RV32I-NEXT: srli a4, a4, 1 -; RV32I-NEXT: sll s0, t0, a0 -; RV32I-NEXT: srli s1, a7, 1 -; RV32I-NEXT: sll a7, a7, a0 -; RV32I-NEXT: srli a6, a6, 1 +; RV32I-NEXT: andi a3, a0, 31 +; RV32I-NEXT: andi a4, a1, 28 +; RV32I-NEXT: xori a1, a3, 31 +; RV32I-NEXT: sub a3, s3, a4 +; RV32I-NEXT: lw a4, 0(a3) +; RV32I-NEXT: lw a5, 4(a3) +; RV32I-NEXT: lw a6, 8(a3) +; RV32I-NEXT: lw a7, 12(a3) +; RV32I-NEXT: lw t0, 16(a3) +; RV32I-NEXT: lw t1, 20(a3) +; RV32I-NEXT: lw t2, 24(a3) +; RV32I-NEXT: lw a3, 28(a3) +; RV32I-NEXT: sll t3, a5, a0 +; RV32I-NEXT: srli t4, a4, 1 +; RV32I-NEXT: sll t5, a7, a0 +; RV32I-NEXT: srli t6, a6, 1 +; RV32I-NEXT: sll a6, a6, a0 +; RV32I-NEXT: srli a5, a5, 1 +; RV32I-NEXT: sll s0, t1, a0 +; RV32I-NEXT: srli s1, t0, 1 +; RV32I-NEXT: sll t0, t0, a0 +; RV32I-NEXT: srli a7, a7, 1 +; RV32I-NEXT: sll s2, a3, a0 +; RV32I-NEXT: srli a3, t2, 1 ; RV32I-NEXT: sll t2, t2, a0 -; RV32I-NEXT: srli s2, t1, 1 -; RV32I-NEXT: sll t1, t1, a0 -; RV32I-NEXT: srli t0, t0, 1 -; RV32I-NEXT: sll s3, a3, a0 +; RV32I-NEXT: srli t1, t1, 1 +; RV32I-NEXT: sll s3, a4, a0 ; RV32I-NEXT: srl a0, t4, a1 -; RV32I-NEXT: srl a3, t6, a1 -; RV32I-NEXT: srl a4, a4, a1 +; RV32I-NEXT: srl a4, t6, a1 +; RV32I-NEXT: srl a5, a5, a1 ; RV32I-NEXT: srl t4, s1, a1 -; RV32I-NEXT: srl a6, a6, a1 -; RV32I-NEXT: srl t6, s2, a1 -; RV32I-NEXT: srl t0, t0, a1 +; RV32I-NEXT: srl a7, a7, a1 +; RV32I-NEXT: srl t6, a3, a1 +; RV32I-NEXT: srl t1, t1, a1 ; RV32I-NEXT: srli s1, s3, 24 -; RV32I-NEXT: srli s2, s3, 16 -; RV32I-NEXT: srli s4, s3, 8 +; RV32I-NEXT: srli s4, s3, 16 +; RV32I-NEXT: srli s5, s3, 8 ; RV32I-NEXT: or a0, t3, a0 -; RV32I-NEXT: or a1, t5, a3 -; RV32I-NEXT: or a3, a5, a4 +; RV32I-NEXT: or a1, t5, a4 +; RV32I-NEXT: or a3, a6, a5 ; RV32I-NEXT: or a4, s0, t4 -; RV32I-NEXT: or a5, a7, a6 -; RV32I-NEXT: or a6, t2, t6 -; RV32I-NEXT: or a7, t1, t0 +; RV32I-NEXT: or a5, t0, a7 +; RV32I-NEXT: or a6, s2, t6 +; RV32I-NEXT: or a7, t2, t1 ; RV32I-NEXT: sb s3, 0(a2) -; RV32I-NEXT: sb s4, 1(a2) -; RV32I-NEXT: sb s2, 2(a2) +; RV32I-NEXT: sb s5, 1(a2) +; RV32I-NEXT: sb s4, 2(a2) ; RV32I-NEXT: sb s1, 3(a2) ; RV32I-NEXT: srli t0, a7, 24 ; RV32I-NEXT: srli t1, a7, 16 @@ -2152,17 +2251,19 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-LABEL: ashr_32bytes: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -144 -; RV64I-NEXT: sd s0, 136(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s1, 128(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s2, 120(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s3, 112(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s4, 104(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s5, 96(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s6, 88(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s7, 80(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s8, 72(sp) # 8-byte Folded Spill -; RV64I-NEXT: sd s9, 64(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi sp, sp, -160 +; RV64I-NEXT: sd s0, 152(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 144(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 136(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s3, 128(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s4, 120(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s5, 112(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s6, 104(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s7, 96(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s8, 88(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s9, 80(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s10, 72(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s11, 64(sp) # 8-byte Folded Spill ; RV64I-NEXT: lbu a3, 0(a0) ; RV64I-NEXT: lbu a4, 1(a0) ; RV64I-NEXT: lbu a5, 2(a0) @@ -2179,123 +2280,144 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: lbu s1, 13(a0) ; RV64I-NEXT: lbu s2, 14(a0) ; RV64I-NEXT: lbu s3, 15(a0) -; RV64I-NEXT: slli a4, a4, 8 -; RV64I-NEXT: slli a5, a5, 16 -; RV64I-NEXT: slli a6, a6, 24 -; RV64I-NEXT: or a3, a4, a3 -; RV64I-NEXT: or a4, a6, a5 ; RV64I-NEXT: lbu s4, 16(a0) ; RV64I-NEXT: lbu s5, 17(a0) ; RV64I-NEXT: lbu s6, 18(a0) ; RV64I-NEXT: lbu s7, 19(a0) +; RV64I-NEXT: slli a4, a4, 8 +; RV64I-NEXT: slli a5, a5, 16 +; RV64I-NEXT: slli a6, a6, 24 ; RV64I-NEXT: slli t0, t0, 8 ; RV64I-NEXT: slli t1, t1, 16 ; RV64I-NEXT: slli t2, t2, 24 +; RV64I-NEXT: or a3, a4, a3 +; RV64I-NEXT: or a4, a6, a5 +; RV64I-NEXT: or a5, t0, a7 +; RV64I-NEXT: or a6, t2, t1 +; RV64I-NEXT: lbu s8, 20(a0) +; RV64I-NEXT: lbu s9, 21(a0) +; RV64I-NEXT: lbu s10, 22(a0) +; RV64I-NEXT: lbu s11, 23(a0) ; RV64I-NEXT: slli t4, t4, 8 ; RV64I-NEXT: slli t5, t5, 16 ; RV64I-NEXT: slli t6, t6, 24 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a6, t2, t1 -; RV64I-NEXT: or a7, t4, t3 -; RV64I-NEXT: or t0, t6, t5 -; RV64I-NEXT: lbu t5, 20(a0) -; RV64I-NEXT: lbu t6, 21(a0) -; RV64I-NEXT: lbu s8, 22(a0) -; RV64I-NEXT: lbu s9, 23(a0) ; RV64I-NEXT: slli s1, s1, 8 ; RV64I-NEXT: slli s2, s2, 16 ; RV64I-NEXT: slli s3, s3, 24 +; RV64I-NEXT: or a7, t4, t3 +; RV64I-NEXT: or t0, t6, t5 +; RV64I-NEXT: or t1, s1, s0 +; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: lbu t6, 24(a0) +; RV64I-NEXT: lbu s0, 25(a0) +; RV64I-NEXT: lbu s1, 26(a0) +; RV64I-NEXT: lbu s2, 27(a0) ; RV64I-NEXT: slli s5, s5, 8 ; RV64I-NEXT: slli s6, s6, 16 ; RV64I-NEXT: slli s7, s7, 24 -; RV64I-NEXT: or t1, s1, s0 -; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: slli s9, s9, 8 ; RV64I-NEXT: or t3, s5, s4 ; RV64I-NEXT: or t4, s7, s6 -; RV64I-NEXT: lbu s0, 24(a0) -; RV64I-NEXT: lbu s1, 25(a0) -; RV64I-NEXT: lbu s2, 26(a0) -; RV64I-NEXT: lbu s3, 27(a0) -; RV64I-NEXT: slli t6, t6, 8 -; RV64I-NEXT: slli s8, s8, 16 -; RV64I-NEXT: slli s9, s9, 24 -; RV64I-NEXT: slli s1, s1, 8 -; RV64I-NEXT: or t5, t6, t5 -; RV64I-NEXT: or t6, s9, s8 -; RV64I-NEXT: or s0, s1, s0 -; RV64I-NEXT: lbu s1, 28(a0) +; RV64I-NEXT: or t5, s9, s8 +; RV64I-NEXT: lbu s3, 28(a0) ; RV64I-NEXT: lbu s4, 29(a0) ; RV64I-NEXT: lbu s5, 30(a0) ; RV64I-NEXT: lbu s6, 31(a0) -; RV64I-NEXT: lbu a0, 0(a1) -; RV64I-NEXT: slli s2, s2, 16 -; RV64I-NEXT: slli s3, s3, 24 -; RV64I-NEXT: or a1, s3, s2 -; RV64I-NEXT: mv s2, sp +; RV64I-NEXT: slli s10, s10, 16 +; RV64I-NEXT: slli s11, s11, 24 +; RV64I-NEXT: slli s0, s0, 8 +; RV64I-NEXT: slli s1, s1, 16 +; RV64I-NEXT: slli s2, s2, 24 ; RV64I-NEXT: slli s4, s4, 8 +; RV64I-NEXT: or a0, s11, s10 +; RV64I-NEXT: or t6, s0, t6 +; RV64I-NEXT: or s0, s2, s1 +; RV64I-NEXT: or s1, s4, s3 +; RV64I-NEXT: lbu s2, 0(a1) +; RV64I-NEXT: lbu s3, 1(a1) +; RV64I-NEXT: lbu s4, 2(a1) +; RV64I-NEXT: lbu s7, 3(a1) ; RV64I-NEXT: slli s5, s5, 16 ; RV64I-NEXT: slli s6, s6, 24 -; RV64I-NEXT: or s1, s4, s1 -; RV64I-NEXT: srli s3, a0, 3 -; RV64I-NEXT: or s4, s6, s5 -; RV64I-NEXT: andi s5, a0, 63 -; RV64I-NEXT: andi s3, s3, 24 -; RV64I-NEXT: xori s5, s5, 63 +; RV64I-NEXT: slli s3, s3, 8 +; RV64I-NEXT: slli s4, s4, 16 +; RV64I-NEXT: slli s7, s7, 24 +; RV64I-NEXT: or s5, s6, s5 +; RV64I-NEXT: or s2, s3, s2 +; RV64I-NEXT: or s3, s7, s4 +; RV64I-NEXT: lbu s4, 5(a1) +; RV64I-NEXT: lbu s6, 4(a1) +; RV64I-NEXT: lbu s7, 6(a1) +; RV64I-NEXT: lbu a1, 7(a1) +; RV64I-NEXT: slli s4, s4, 8 +; RV64I-NEXT: or s4, s4, s6 +; RV64I-NEXT: slli s7, s7, 16 +; RV64I-NEXT: slli a1, a1, 24 +; RV64I-NEXT: or a1, a1, s7 +; RV64I-NEXT: mv s6, sp ; RV64I-NEXT: or a3, a4, a3 ; RV64I-NEXT: or a4, a6, a5 ; RV64I-NEXT: or a5, t0, a7 ; RV64I-NEXT: or a6, t2, t1 ; RV64I-NEXT: or a7, t4, t3 -; RV64I-NEXT: or t0, t6, t5 -; RV64I-NEXT: or a1, a1, s0 -; RV64I-NEXT: or t1, s4, s1 -; RV64I-NEXT: add s2, s2, s3 +; RV64I-NEXT: or a0, a0, t5 +; RV64I-NEXT: or t0, s0, t6 +; RV64I-NEXT: or t1, s5, s1 +; RV64I-NEXT: or t2, s3, s2 +; RV64I-NEXT: or a1, a1, s4 ; RV64I-NEXT: slli a4, a4, 32 ; RV64I-NEXT: slli a6, a6, 32 -; RV64I-NEXT: slli t0, t0, 32 -; RV64I-NEXT: slli t2, t1, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: slli t3, t1, 32 +; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: sraiw t1, t1, 31 ; RV64I-NEXT: or a3, a4, a3 ; RV64I-NEXT: or a4, a6, a5 -; RV64I-NEXT: or a5, t0, a7 -; RV64I-NEXT: or a1, t2, a1 +; RV64I-NEXT: or a0, a0, a7 +; RV64I-NEXT: or a5, t3, t0 +; RV64I-NEXT: or a1, a1, t2 ; RV64I-NEXT: sd t1, 32(sp) ; RV64I-NEXT: sd t1, 40(sp) ; RV64I-NEXT: sd t1, 48(sp) ; RV64I-NEXT: sd t1, 56(sp) ; RV64I-NEXT: sd a3, 0(sp) ; RV64I-NEXT: sd a4, 8(sp) -; RV64I-NEXT: sd a5, 16(sp) -; RV64I-NEXT: sd a1, 24(sp) -; RV64I-NEXT: ld a1, 8(s2) -; RV64I-NEXT: ld a3, 16(s2) -; RV64I-NEXT: ld a4, 0(s2) -; RV64I-NEXT: ld a5, 24(s2) -; RV64I-NEXT: srl a6, a1, a0 -; RV64I-NEXT: slli a7, a3, 1 -; RV64I-NEXT: srl a4, a4, a0 -; RV64I-NEXT: slli a1, a1, 1 -; RV64I-NEXT: srl a3, a3, a0 +; RV64I-NEXT: sd a0, 16(sp) +; RV64I-NEXT: sd a5, 24(sp) +; RV64I-NEXT: srli a0, a1, 3 +; RV64I-NEXT: andi a3, a1, 63 +; RV64I-NEXT: andi a0, a0, 24 +; RV64I-NEXT: xori a3, a3, 63 +; RV64I-NEXT: add a0, s6, a0 +; RV64I-NEXT: ld a4, 8(a0) +; RV64I-NEXT: ld a5, 16(a0) +; RV64I-NEXT: ld a6, 0(a0) +; RV64I-NEXT: ld a0, 24(a0) +; RV64I-NEXT: srl a7, a4, a1 ; RV64I-NEXT: slli t0, a5, 1 -; RV64I-NEXT: sra a5, a5, a0 -; RV64I-NEXT: sll a0, a7, s5 -; RV64I-NEXT: sll a1, a1, s5 -; RV64I-NEXT: sll a7, t0, s5 -; RV64I-NEXT: srli t0, a5, 56 -; RV64I-NEXT: srli t1, a5, 48 -; RV64I-NEXT: srli t2, a5, 40 -; RV64I-NEXT: srli t3, a5, 32 -; RV64I-NEXT: srli t4, a5, 24 -; RV64I-NEXT: srli t5, a5, 16 -; RV64I-NEXT: srli t6, a5, 8 -; RV64I-NEXT: or a0, a6, a0 -; RV64I-NEXT: or a1, a4, a1 -; RV64I-NEXT: or a3, a3, a7 +; RV64I-NEXT: srl a6, a6, a1 +; RV64I-NEXT: slli a4, a4, 1 +; RV64I-NEXT: srl a5, a5, a1 +; RV64I-NEXT: slli t1, a0, 1 +; RV64I-NEXT: sra t2, a0, a1 +; RV64I-NEXT: sll a0, t0, a3 +; RV64I-NEXT: sll a1, a4, a3 +; RV64I-NEXT: sll a3, t1, a3 +; RV64I-NEXT: srli a4, t2, 56 +; RV64I-NEXT: srli t0, t2, 48 +; RV64I-NEXT: srli t1, t2, 40 +; RV64I-NEXT: srli t3, t2, 32 +; RV64I-NEXT: srli t4, t2, 24 +; RV64I-NEXT: srli t5, t2, 16 +; RV64I-NEXT: srli t6, t2, 8 +; RV64I-NEXT: or a0, a7, a0 +; RV64I-NEXT: or a1, a6, a1 +; RV64I-NEXT: or a3, a5, a3 ; RV64I-NEXT: sb t3, 28(a2) -; RV64I-NEXT: sb t2, 29(a2) -; RV64I-NEXT: sb t1, 30(a2) -; RV64I-NEXT: sb t0, 31(a2) -; RV64I-NEXT: sb a5, 24(a2) +; RV64I-NEXT: sb t1, 29(a2) +; RV64I-NEXT: sb t0, 30(a2) +; RV64I-NEXT: sb a4, 31(a2) +; RV64I-NEXT: sb t2, 24(a2) ; RV64I-NEXT: sb t6, 25(a2) ; RV64I-NEXT: sb t5, 26(a2) ; RV64I-NEXT: sb t4, 27(a2) @@ -2316,45 +2438,47 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV64I-NEXT: srli s3, a0, 56 ; RV64I-NEXT: srli s4, a0, 48 ; RV64I-NEXT: srli s5, a0, 40 +; RV64I-NEXT: srli s6, a0, 32 ; RV64I-NEXT: sb a7, 20(a2) ; RV64I-NEXT: sb a6, 21(a2) ; RV64I-NEXT: sb a5, 22(a2) ; RV64I-NEXT: sb a4, 23(a2) -; RV64I-NEXT: srli a4, a0, 32 +; RV64I-NEXT: srli a4, a0, 24 ; RV64I-NEXT: sb a3, 16(a2) ; RV64I-NEXT: sb t2, 17(a2) ; RV64I-NEXT: sb t1, 18(a2) ; RV64I-NEXT: sb t0, 19(a2) -; RV64I-NEXT: srli a3, a0, 24 +; RV64I-NEXT: srli a3, a0, 16 ; RV64I-NEXT: sb t6, 4(a2) ; RV64I-NEXT: sb t5, 5(a2) ; RV64I-NEXT: sb t4, 6(a2) ; RV64I-NEXT: sb t3, 7(a2) -; RV64I-NEXT: srli a5, a0, 16 +; RV64I-NEXT: srli a5, a0, 8 ; RV64I-NEXT: sb a1, 0(a2) ; RV64I-NEXT: sb s2, 1(a2) ; RV64I-NEXT: sb s1, 2(a2) ; RV64I-NEXT: sb s0, 3(a2) -; RV64I-NEXT: srli a1, a0, 8 -; RV64I-NEXT: sb a4, 12(a2) +; RV64I-NEXT: sb s6, 12(a2) ; RV64I-NEXT: sb s5, 13(a2) ; RV64I-NEXT: sb s4, 14(a2) ; RV64I-NEXT: sb s3, 15(a2) ; RV64I-NEXT: sb a0, 8(a2) -; RV64I-NEXT: sb a1, 9(a2) -; RV64I-NEXT: sb a5, 10(a2) -; RV64I-NEXT: sb a3, 11(a2) -; RV64I-NEXT: ld s0, 136(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s1, 128(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s2, 120(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s3, 112(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s4, 104(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s5, 96(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s6, 88(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s7, 80(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s8, 72(sp) # 8-byte Folded Reload -; RV64I-NEXT: ld s9, 64(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 144 +; RV64I-NEXT: sb a5, 9(a2) +; RV64I-NEXT: sb a3, 10(a2) +; RV64I-NEXT: sb a4, 11(a2) +; RV64I-NEXT: ld s0, 152(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 144(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 136(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s3, 128(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s4, 120(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s5, 112(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s6, 104(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s7, 96(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s8, 88(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s9, 80(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s10, 72(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s11, 64(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 160 ; RV64I-NEXT: ret ; ; RV32I-LABEL: ashr_32bytes: @@ -2379,148 +2503,159 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind { ; RV32I-NEXT: lbu a7, 3(a0) ; RV32I-NEXT: lbu a5, 4(a0) ; RV32I-NEXT: lbu t0, 5(a0) -; RV32I-NEXT: lbu t3, 6(a0) -; RV32I-NEXT: lbu t4, 7(a0) -; RV32I-NEXT: lbu t6, 8(a0) -; RV32I-NEXT: lbu s0, 9(a0) -; RV32I-NEXT: lbu s4, 10(a0) -; RV32I-NEXT: lbu s5, 11(a0) -; RV32I-NEXT: lbu s6, 12(a0) -; RV32I-NEXT: lbu s7, 13(a0) -; RV32I-NEXT: lbu s8, 14(a0) -; RV32I-NEXT: lbu s9, 15(a0) -; RV32I-NEXT: lbu s10, 16(a0) -; RV32I-NEXT: lbu s11, 17(a0) -; RV32I-NEXT: lbu s2, 18(a0) -; RV32I-NEXT: lbu s3, 19(a0) +; RV32I-NEXT: lbu t1, 6(a0) +; RV32I-NEXT: lbu t2, 7(a0) +; RV32I-NEXT: lbu t3, 8(a0) +; RV32I-NEXT: lbu t4, 9(a0) +; RV32I-NEXT: lbu t5, 10(a0) +; RV32I-NEXT: lbu t6, 11(a0) +; RV32I-NEXT: lbu s0, 12(a0) +; RV32I-NEXT: lbu s1, 13(a0) +; RV32I-NEXT: lbu s2, 14(a0) +; RV32I-NEXT: lbu s3, 15(a0) +; RV32I-NEXT: lbu s4, 16(a0) +; RV32I-NEXT: lbu s5, 17(a0) +; RV32I-NEXT: lbu s6, 18(a0) +; RV32I-NEXT: lbu s7, 19(a0) ; RV32I-NEXT: slli a4, a4, 8 ; RV32I-NEXT: slli a6, a6, 16 ; RV32I-NEXT: slli a7, a7, 24 ; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: or a4, a7, a6 -; RV32I-NEXT: lbu t1, 20(a0) -; RV32I-NEXT: lbu t2, 21(a0) -; RV32I-NEXT: lbu t5, 22(a0) -; RV32I-NEXT: lbu s1, 23(a0) +; RV32I-NEXT: lbu s8, 20(a0) +; RV32I-NEXT: lbu s9, 21(a0) +; RV32I-NEXT: lbu s10, 22(a0) +; RV32I-NEXT: lbu s11, 23(a0) ; RV32I-NEXT: slli t0, t0, 8 -; RV32I-NEXT: slli t3, t3, 16 -; RV32I-NEXT: slli t4, t4, 24 -; RV32I-NEXT: slli s0, s0, 8 -; RV32I-NEXT: slli s4, s4, 16 -; RV32I-NEXT: slli s5, s5, 24 +; RV32I-NEXT: slli t1, t1, 16 +; RV32I-NEXT: slli t2, t2, 24 +; RV32I-NEXT: slli t4, t4, 8 +; RV32I-NEXT: slli t5, t5, 16 +; RV32I-NEXT: slli t6, t6, 24 ; RV32I-NEXT: or a5, t0, a5 -; RV32I-NEXT: or a6, t4, t3 -; RV32I-NEXT: or a7, s0, t6 -; RV32I-NEXT: or t0, s5, s4 -; RV32I-NEXT: lbu t3, 24(a0) -; RV32I-NEXT: lbu s4, 25(a0) -; RV32I-NEXT: lbu s5, 26(a0) -; RV32I-NEXT: lbu ra, 27(a0) -; RV32I-NEXT: slli s7, s7, 8 -; RV32I-NEXT: slli s8, s8, 16 -; RV32I-NEXT: slli s9, s9, 24 -; RV32I-NEXT: slli s11, s11, 8 -; RV32I-NEXT: or t4, s7, s6 -; RV32I-NEXT: or t6, s9, s8 -; RV32I-NEXT: or s0, s11, s10 -; RV32I-NEXT: lbu s6, 28(a0) -; RV32I-NEXT: lbu s7, 29(a0) -; RV32I-NEXT: lbu s8, 30(a0) -; RV32I-NEXT: lbu s9, 31(a0) -; RV32I-NEXT: lbu a0, 0(a1) +; RV32I-NEXT: or a6, t2, t1 +; RV32I-NEXT: or a7, t4, t3 +; RV32I-NEXT: or t0, t6, t5 +; RV32I-NEXT: lbu ra, 24(a0) +; RV32I-NEXT: lbu a3, 25(a0) +; RV32I-NEXT: lbu t4, 26(a0) +; RV32I-NEXT: lbu t5, 27(a0) +; RV32I-NEXT: slli s1, s1, 8 ; RV32I-NEXT: slli s2, s2, 16 ; RV32I-NEXT: slli s3, s3, 24 -; RV32I-NEXT: or s2, s3, s2 -; RV32I-NEXT: addi s3, sp, 8 -; RV32I-NEXT: slli t2, t2, 8 -; RV32I-NEXT: slli t5, t5, 16 -; RV32I-NEXT: slli s1, s1, 24 -; RV32I-NEXT: slli s4, s4, 8 -; RV32I-NEXT: slli s5, s5, 16 -; RV32I-NEXT: slli ra, ra, 24 -; RV32I-NEXT: slli s7, s7, 8 -; RV32I-NEXT: slli s8, s8, 16 -; RV32I-NEXT: slli s9, s9, 24 -; RV32I-NEXT: or t1, t2, t1 -; RV32I-NEXT: srli a1, a0, 3 +; RV32I-NEXT: slli s5, s5, 8 +; RV32I-NEXT: or t1, s1, s0 +; RV32I-NEXT: or t2, s3, s2 +; RV32I-NEXT: or t3, s5, s4 +; RV32I-NEXT: lbu t6, 28(a0) +; RV32I-NEXT: lbu s0, 29(a0) +; RV32I-NEXT: lbu s1, 30(a0) +; RV32I-NEXT: lbu a0, 31(a0) +; RV32I-NEXT: slli s6, s6, 16 +; RV32I-NEXT: slli s7, s7, 24 +; RV32I-NEXT: slli s9, s9, 8 +; RV32I-NEXT: slli s10, s10, 16 +; RV32I-NEXT: slli s11, s11, 24 +; RV32I-NEXT: or s2, s7, s6 +; RV32I-NEXT: or s3, s9, s8 +; RV32I-NEXT: or s4, s11, s10 +; RV32I-NEXT: lbu s5, 0(a1) +; RV32I-NEXT: lbu s6, 1(a1) +; RV32I-NEXT: lbu s7, 2(a1) +; RV32I-NEXT: lbu a1, 3(a1) +; RV32I-NEXT: slli a3, a3, 8 +; RV32I-NEXT: or a3, a3, ra +; RV32I-NEXT: addi s8, sp, 8 +; RV32I-NEXT: slli t4, t4, 16 +; RV32I-NEXT: slli t5, t5, 24 +; RV32I-NEXT: slli s0, s0, 8 +; RV32I-NEXT: slli s1, s1, 16 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: slli s6, s6, 8 +; RV32I-NEXT: slli s7, s7, 16 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or t4, t5, t4 +; RV32I-NEXT: or t5, s0, t6 +; RV32I-NEXT: or s1, a0, s1 +; RV32I-NEXT: or t6, s6, s5 +; RV32I-NEXT: or a1, a1, s7 +; RV32I-NEXT: srai s0, a0, 31 +; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: or a4, a4, a0 +; RV32I-NEXT: or a5, a6, a5 +; RV32I-NEXT: or a6, t0, a7 +; RV32I-NEXT: or a7, t2, t1 +; RV32I-NEXT: or t0, s2, t3 +; RV32I-NEXT: or t1, s4, s3 +; RV32I-NEXT: or a3, t4, a3 ; RV32I-NEXT: or t2, s1, t5 -; RV32I-NEXT: andi t5, a0, 31 -; RV32I-NEXT: or t3, s4, t3 -; RV32I-NEXT: or s1, ra, s5 -; RV32I-NEXT: or s4, s7, s6 -; RV32I-NEXT: or s5, s9, s8 -; RV32I-NEXT: srai s6, s9, 31 -; RV32I-NEXT: andi s7, a1, 28 -; RV32I-NEXT: xori a1, t5, 31 -; RV32I-NEXT: or a3, a4, a3 -; RV32I-NEXT: or a4, a6, a5 -; RV32I-NEXT: or a5, t0, a7 -; RV32I-NEXT: or a6, t6, t4 -; RV32I-NEXT: or a7, s2, s0 -; RV32I-NEXT: or t0, t2, t1 -; RV32I-NEXT: or t1, s1, t3 -; RV32I-NEXT: or t2, s5, s4 -; RV32I-NEXT: sw s6, 56(sp) -; RV32I-NEXT: sw s6, 60(sp) -; RV32I-NEXT: sw s6, 64(sp) -; RV32I-NEXT: sw s6, 68(sp) -; RV32I-NEXT: sw s6, 40(sp) -; RV32I-NEXT: sw s6, 44(sp) -; RV32I-NEXT: sw s6, 48(sp) -; RV32I-NEXT: sw s6, 52(sp) -; RV32I-NEXT: add s3, s3, s7 -; RV32I-NEXT: sw a7, 24(sp) -; RV32I-NEXT: sw t0, 28(sp) -; RV32I-NEXT: sw t1, 32(sp) +; RV32I-NEXT: or a0, a1, t6 +; RV32I-NEXT: sw s0, 56(sp) +; RV32I-NEXT: sw s0, 60(sp) +; RV32I-NEXT: sw s0, 64(sp) +; RV32I-NEXT: sw s0, 68(sp) +; RV32I-NEXT: sw s0, 40(sp) +; RV32I-NEXT: sw s0, 44(sp) +; RV32I-NEXT: sw s0, 48(sp) +; RV32I-NEXT: sw s0, 52(sp) +; RV32I-NEXT: sw t0, 24(sp) +; RV32I-NEXT: sw t1, 28(sp) +; RV32I-NEXT: sw a3, 32(sp) ; RV32I-NEXT: sw t2, 36(sp) -; RV32I-NEXT: sw a3, 8(sp) -; RV32I-NEXT: sw a4, 12(sp) -; RV32I-NEXT: sw a5, 16(sp) -; RV32I-NEXT: sw a6, 20(sp) -; RV32I-NEXT: lw a3, 0(s3) -; RV32I-NEXT: lw a4, 4(s3) -; RV32I-NEXT: lw a5, 8(s3) -; RV32I-NEXT: lw a6, 12(s3) -; RV32I-NEXT: lw a7, 16(s3) -; RV32I-NEXT: lw t0, 20(s3) -; RV32I-NEXT: lw t1, 24(s3) -; RV32I-NEXT: lw t2, 28(s3) -; RV32I-NEXT: srl t3, a4, a0 -; RV32I-NEXT: slli t4, a5, 1 +; RV32I-NEXT: sw a4, 8(sp) +; RV32I-NEXT: sw a5, 12(sp) +; RV32I-NEXT: sw a6, 16(sp) +; RV32I-NEXT: sw a7, 20(sp) +; RV32I-NEXT: srli a1, a0, 3 +; RV32I-NEXT: andi a3, a0, 31 +; RV32I-NEXT: andi a4, a1, 28 +; RV32I-NEXT: xori a1, a3, 31 +; RV32I-NEXT: add a4, s8, a4 +; RV32I-NEXT: lw a3, 0(a4) +; RV32I-NEXT: lw a5, 4(a4) +; RV32I-NEXT: lw a6, 8(a4) +; RV32I-NEXT: lw a7, 12(a4) +; RV32I-NEXT: lw t0, 16(a4) +; RV32I-NEXT: lw t1, 20(a4) +; RV32I-NEXT: lw t2, 24(a4) +; RV32I-NEXT: lw a4, 28(a4) +; RV32I-NEXT: srl t3, a5, a0 +; RV32I-NEXT: slli t4, a6, 1 ; RV32I-NEXT: srl a3, a3, a0 -; RV32I-NEXT: slli a4, a4, 1 -; RV32I-NEXT: srl t5, a6, a0 -; RV32I-NEXT: slli t6, a7, 1 -; RV32I-NEXT: srl a5, a5, a0 -; RV32I-NEXT: slli a6, a6, 1 -; RV32I-NEXT: srl s0, t0, a0 -; RV32I-NEXT: slli s1, t1, 1 -; RV32I-NEXT: srl a7, a7, a0 -; RV32I-NEXT: slli t0, t0, 1 -; RV32I-NEXT: srl t1, t1, a0 -; RV32I-NEXT: slli s2, t2, 1 -; RV32I-NEXT: sra t2, t2, a0 +; RV32I-NEXT: slli a5, a5, 1 +; RV32I-NEXT: srl t5, a7, a0 +; RV32I-NEXT: slli t6, t0, 1 +; RV32I-NEXT: srl a6, a6, a0 +; RV32I-NEXT: slli a7, a7, 1 +; RV32I-NEXT: srl s0, t1, a0 +; RV32I-NEXT: slli s1, t2, 1 +; RV32I-NEXT: srl t0, t0, a0 +; RV32I-NEXT: slli t1, t1, 1 +; RV32I-NEXT: srl t2, t2, a0 +; RV32I-NEXT: slli s2, a4, 1 +; RV32I-NEXT: sra s3, a4, a0 ; RV32I-NEXT: sll a0, t4, a1 -; RV32I-NEXT: sll a4, a4, a1 -; RV32I-NEXT: sll t4, t6, a1 -; RV32I-NEXT: sll a6, a6, a1 -; RV32I-NEXT: sll t6, s1, a1 -; RV32I-NEXT: sll t0, t0, a1 -; RV32I-NEXT: sll s1, s2, a1 -; RV32I-NEXT: srli s2, t2, 24 -; RV32I-NEXT: srli s3, t2, 16 -; RV32I-NEXT: srli s4, t2, 8 +; RV32I-NEXT: sll a4, a5, a1 +; RV32I-NEXT: sll a5, t6, a1 +; RV32I-NEXT: sll a7, a7, a1 +; RV32I-NEXT: sll t4, s1, a1 +; RV32I-NEXT: sll t1, t1, a1 +; RV32I-NEXT: sll t6, s2, a1 +; RV32I-NEXT: srli s1, s3, 24 +; RV32I-NEXT: srli s2, s3, 16 +; RV32I-NEXT: srli s4, s3, 8 ; RV32I-NEXT: or a0, t3, a0 ; RV32I-NEXT: or a1, a3, a4 -; RV32I-NEXT: or a3, t5, t4 -; RV32I-NEXT: or a4, a5, a6 -; RV32I-NEXT: or a5, s0, t6 -; RV32I-NEXT: or a6, a7, t0 -; RV32I-NEXT: or a7, t1, s1 -; RV32I-NEXT: sb t2, 28(a2) +; RV32I-NEXT: or a3, t5, a5 +; RV32I-NEXT: or a4, a6, a7 +; RV32I-NEXT: or a5, s0, t4 +; RV32I-NEXT: or a6, t0, t1 +; RV32I-NEXT: or a7, t2, t6 +; RV32I-NEXT: sb s3, 28(a2) ; RV32I-NEXT: sb s4, 29(a2) -; RV32I-NEXT: sb s3, 30(a2) -; RV32I-NEXT: sb s2, 31(a2) +; RV32I-NEXT: sb s2, 30(a2) +; RV32I-NEXT: sb s1, 31(a2) ; RV32I-NEXT: srli t0, a7, 24 ; RV32I-NEXT: srli t1, a7, 16 ; RV32I-NEXT: srli t2, a7, 8 diff --git a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll index cdaae23..5724c4f 100644 --- a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll +++ b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll @@ -1,33 +1,27 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d -mattr=+xtheadfmemidx -mattr=+m -verify-machineinstrs < %s \ -; RUN: | FileCheck %s -check-prefix=RV32XTHEADMEMIDX -; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadfmemidx -verify-machineinstrs < %s \ -; RUN: | FileCheck %s -check-prefix=RV64XTHEADFMEMIDX +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d,+xtheadfmemidx \ +; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32XTHEADFMEMIDX +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d,+xtheadfmemidx \ +; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64XTHEADFMEMIDX -define float @flrw(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: flrw: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.flrw fa5, a0, a1, 2 -; RV32XTHEADMEMIDX-NEXT: fadd.s fa0, fa5, fa5 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADFMEMIDX-LABEL: flrw: -; RV64XTHEADFMEMIDX: # %bb.0: -; RV64XTHEADFMEMIDX-NEXT: th.flrw fa5, a0, a1, 2 -; RV64XTHEADFMEMIDX-NEXT: fadd.s fa0, fa5, fa5 -; RV64XTHEADFMEMIDX-NEXT: ret - %1 = getelementptr float, ptr %a, i64 %b +define float @flrw(ptr %a, iXLen %b) { +; CHECK-LABEL: flrw: +; CHECK: # %bb.0: +; CHECK-NEXT: th.flrw fa5, a0, a1, 2 +; CHECK-NEXT: fadd.s fa0, fa5, fa5 +; CHECK-NEXT: ret + %1 = getelementptr float, ptr %a, iXLen %b %2 = load float, ptr %1, align 4 %3 = fadd float %2, %2 ret float %3 } define float @flurw(ptr %a, i32 %b) { -; RV32XTHEADMEMIDX-LABEL: flurw: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.flrw fa5, a0, a1, 2 -; RV32XTHEADMEMIDX-NEXT: fadd.s fa0, fa5, fa5 -; RV32XTHEADMEMIDX-NEXT: ret +; RV32XTHEADFMEMIDX-LABEL: flurw: +; RV32XTHEADFMEMIDX: # %bb.0: +; RV32XTHEADFMEMIDX-NEXT: th.flrw fa5, a0, a1, 2 +; RV32XTHEADFMEMIDX-NEXT: fadd.s fa0, fa5, fa5 +; RV32XTHEADFMEMIDX-NEXT: ret ; ; RV64XTHEADFMEMIDX-LABEL: flurw: ; RV64XTHEADFMEMIDX: # %bb.0: @@ -41,30 +35,24 @@ define float @flurw(ptr %a, i32 %b) { ret float %4 } -define void @fsrw(ptr %a, i64 %b, float %c) { -; RV32XTHEADMEMIDX-LABEL: fsrw: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: fadd.s fa5, fa0, fa0 -; RV32XTHEADMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADFMEMIDX-LABEL: fsrw: -; RV64XTHEADFMEMIDX: # %bb.0: -; RV64XTHEADFMEMIDX-NEXT: fadd.s fa5, fa0, fa0 -; RV64XTHEADFMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2 -; RV64XTHEADFMEMIDX-NEXT: ret +define void @fsrw(ptr %a, iXLen %b, float %c) { +; CHECK-LABEL: fsrw: +; CHECK: # %bb.0: +; CHECK-NEXT: fadd.s fa5, fa0, fa0 +; CHECK-NEXT: th.fsrw fa5, a0, a1, 2 +; CHECK-NEXT: ret %1 = fadd float %c, %c - %2 = getelementptr float, ptr %a, i64 %b + %2 = getelementptr float, ptr %a, iXLen %b store float %1, ptr %2, align 4 ret void } define void @fsurw(ptr %a, i32 %b, float %c) { -; RV32XTHEADMEMIDX-LABEL: fsurw: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: fadd.s fa5, fa0, fa0 -; RV32XTHEADMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2 -; RV32XTHEADMEMIDX-NEXT: ret +; RV32XTHEADFMEMIDX-LABEL: fsurw: +; RV32XTHEADFMEMIDX: # %bb.0: +; RV32XTHEADFMEMIDX-NEXT: fadd.s fa5, fa0, fa0 +; RV32XTHEADFMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2 +; RV32XTHEADFMEMIDX-NEXT: ret ; ; RV64XTHEADFMEMIDX-LABEL: fsurw: ; RV64XTHEADFMEMIDX: # %bb.0: @@ -78,30 +66,24 @@ define void @fsurw(ptr %a, i32 %b, float %c) { ret void } -define double @flrd(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: flrd: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.flrd fa5, a0, a1, 3 -; RV32XTHEADMEMIDX-NEXT: fadd.d fa0, fa5, fa5 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADFMEMIDX-LABEL: flrd: -; RV64XTHEADFMEMIDX: # %bb.0: -; RV64XTHEADFMEMIDX-NEXT: th.flrd fa5, a0, a1, 3 -; RV64XTHEADFMEMIDX-NEXT: fadd.d fa0, fa5, fa5 -; RV64XTHEADFMEMIDX-NEXT: ret - %1 = getelementptr double, ptr %a, i64 %b +define double @flrd(ptr %a, iXLen %b) { +; CHECK-LABEL: flrd: +; CHECK: # %bb.0: +; CHECK-NEXT: th.flrd fa5, a0, a1, 3 +; CHECK-NEXT: fadd.d fa0, fa5, fa5 +; CHECK-NEXT: ret + %1 = getelementptr double, ptr %a, iXLen %b %2 = load double, ptr %1, align 8 %3 = fadd double %2, %2 ret double %3 } define double @flurd(ptr %a, i32 %b) { -; RV32XTHEADMEMIDX-LABEL: flurd: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.flrd fa5, a0, a1, 3 -; RV32XTHEADMEMIDX-NEXT: fadd.d fa0, fa5, fa5 -; RV32XTHEADMEMIDX-NEXT: ret +; RV32XTHEADFMEMIDX-LABEL: flurd: +; RV32XTHEADFMEMIDX: # %bb.0: +; RV32XTHEADFMEMIDX-NEXT: th.flrd fa5, a0, a1, 3 +; RV32XTHEADFMEMIDX-NEXT: fadd.d fa0, fa5, fa5 +; RV32XTHEADFMEMIDX-NEXT: ret ; ; RV64XTHEADFMEMIDX-LABEL: flurd: ; RV64XTHEADFMEMIDX: # %bb.0: @@ -115,30 +97,24 @@ define double @flurd(ptr %a, i32 %b) { ret double %4 } -define void @fsrd(ptr %a, i64 %b, double %c) { -; RV32XTHEADMEMIDX-LABEL: fsrd: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: fadd.d fa5, fa0, fa0 -; RV32XTHEADMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADFMEMIDX-LABEL: fsrd: -; RV64XTHEADFMEMIDX: # %bb.0: -; RV64XTHEADFMEMIDX-NEXT: fadd.d fa5, fa0, fa0 -; RV64XTHEADFMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3 -; RV64XTHEADFMEMIDX-NEXT: ret +define void @fsrd(ptr %a, iXLen %b, double %c) { +; CHECK-LABEL: fsrd: +; CHECK: # %bb.0: +; CHECK-NEXT: fadd.d fa5, fa0, fa0 +; CHECK-NEXT: th.fsrd fa5, a0, a1, 3 +; CHECK-NEXT: ret %1 = fadd double %c, %c - %2 = getelementptr double, ptr %a, i64 %b + %2 = getelementptr double, ptr %a, iXLen %b store double %1, ptr %2, align 8 ret void } define void @fsurd(ptr %a, i32 %b, double %c) { -; RV32XTHEADMEMIDX-LABEL: fsurd: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: fadd.d fa5, fa0, fa0 -; RV32XTHEADMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3 -; RV32XTHEADMEMIDX-NEXT: ret +; RV32XTHEADFMEMIDX-LABEL: fsurd: +; RV32XTHEADFMEMIDX: # %bb.0: +; RV32XTHEADFMEMIDX-NEXT: fadd.d fa5, fa0, fa0 +; RV32XTHEADFMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3 +; RV32XTHEADFMEMIDX-NEXT: ret ; ; RV64XTHEADFMEMIDX-LABEL: fsurd: ; RV64XTHEADFMEMIDX: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll index fc20fcb..a20b08a 100644 --- a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll +++ b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll @@ -1,238 +1,156 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \ -; RUN: | FileCheck %s -check-prefix=RV32XTHEADMEMIDX -; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \ -; RUN: | FileCheck %s -check-prefix=RV64XTHEADMEMIDX +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d,+xtheadmemidx \ +; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32XTHEADMEMIDX +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d,+xtheadmemidx \ +; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64XTHEADMEMIDX define ptr @lbia(ptr %base, ptr %addr.2, i8 %a) { -; RV32XTHEADMEMIDX-LABEL: lbia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lbia a3, (a0), -1, 0 -; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV32XTHEADMEMIDX-NEXT: sb a2, 0(a1) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lbia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lbia a3, (a0), -1, 0 -; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV64XTHEADMEMIDX-NEXT: sb a2, 0(a1) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i8, ptr %base, i8 0 +; CHECK-LABEL: lbia: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lbia a3, (a0), -1, 0 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: sb a2, 0(a1) +; CHECK-NEXT: ret + %addr = getelementptr i8, ptr %base, iXLen 0 %ld = load i8, ptr %addr - %addr.1 = getelementptr i8, ptr %base, i8 -1 + %addr.1 = getelementptr i8, ptr %base, iXLen -1 %res = add i8 %ld, %a store i8 %res, ptr %addr.2 ret ptr %addr.1 } define ptr @lbib(ptr %base, i8 %a) { -; RV32XTHEADMEMIDX-LABEL: lbib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lbib a2, (a0), 1, 0 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV32XTHEADMEMIDX-NEXT: sb a1, 1(a0) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lbib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lbib a2, (a0), 1, 0 -; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV64XTHEADMEMIDX-NEXT: sb a1, 1(a0) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i8, ptr %base, i8 1 +; CHECK-LABEL: lbib: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lbib a2, (a0), 1, 0 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: sb a1, 1(a0) +; CHECK-NEXT: ret + %addr = getelementptr i8, ptr %base, iXLen 1 %ld = load i8, ptr %addr - %addr.1 = getelementptr i8, ptr %base, i8 2 + %addr.1 = getelementptr i8, ptr %base, iXLen 2 %res = add i8 %ld, %a store i8 %res, ptr %addr.1 ret ptr %addr } -define ptr @lbuia(ptr %base, ptr %addr.2, i64 %a) { -; RV32XTHEADMEMIDX-LABEL: lbuia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lbuia a4, (a0), -1, 0 -; RV32XTHEADMEMIDX-NEXT: add a2, a4, a2 -; RV32XTHEADMEMIDX-NEXT: sltu a4, a2, a4 -; RV32XTHEADMEMIDX-NEXT: add a3, a3, a4 -; RV32XTHEADMEMIDX-NEXT: sw a2, 0(a1) -; RV32XTHEADMEMIDX-NEXT: sw a3, 4(a1) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lbuia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lbuia a3, (a0), -1, 0 -; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i8, ptr %base, i8 0 +define ptr @lbuia(ptr %base, ptr %addr.2, i32 %a) { +; CHECK-LABEL: lbuia: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lbuia a3, (a0), -1, 0 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: sw a2, 0(a1) +; CHECK-NEXT: ret + %addr = getelementptr i8, ptr %base, iXLen 0 %ld = load i8, ptr %addr - %zext = zext i8 %ld to i64 - %addr.1 = getelementptr i8, ptr %base, i8 -1 - %res = add i64 %zext, %a - store i64 %res, ptr %addr.2 + %zext = zext i8 %ld to i32 + %addr.1 = getelementptr i8, ptr %base, iXLen -1 + %res = add i32 %zext, %a + store i32 %res, ptr %addr.2 ret ptr %addr.1 } -define ptr @lbuib(ptr %base, i64 %a, ptr %addr.1) { -; RV32XTHEADMEMIDX-LABEL: lbuib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lbuib a4, (a0), 1, 0 -; RV32XTHEADMEMIDX-NEXT: add a1, a4, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a4, a1, a4 -; RV32XTHEADMEMIDX-NEXT: add a2, a2, a4 -; RV32XTHEADMEMIDX-NEXT: sw a1, 0(a3) -; RV32XTHEADMEMIDX-NEXT: sw a2, 4(a3) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lbuib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lbuib a3, (a0), 1, 0 -; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1 -; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i8, ptr %base, i8 1 +define ptr @lbuib(ptr %base, i32 %a, ptr %addr.1) { +; CHECK-LABEL: lbuib: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lbuib a3, (a0), 1, 0 +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: sw a1, 0(a2) +; CHECK-NEXT: ret + %addr = getelementptr i8, ptr %base, iXLen 1 %ld = load i8, ptr %addr - %zext = zext i8 %ld to i64 - %res = add i64 %zext, %a - store i64 %res, ptr %addr.1 + %zext = zext i8 %ld to i32 + %res = add i32 %zext, %a + store i32 %res, ptr %addr.1 ret ptr %addr } define ptr @lhia(ptr %base, ptr %addr.2, i16 %a) { -; RV32XTHEADMEMIDX-LABEL: lhia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lhia a3, (a0), -16, 1 -; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV32XTHEADMEMIDX-NEXT: sh a2, 0(a1) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lhia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lhia a3, (a0), -16, 1 -; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV64XTHEADMEMIDX-NEXT: sh a2, 0(a1) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i16, ptr %base, i16 0 +; CHECK-LABEL: lhia: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lhia a3, (a0), -16, 1 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: sh a2, 0(a1) +; CHECK-NEXT: ret + %addr = getelementptr i16, ptr %base, iXLen 0 %ld = load i16, ptr %addr - %addr.1 = getelementptr i16, ptr %base, i16 -16 + %addr.1 = getelementptr i16, ptr %base, iXLen -16 %res = add i16 %ld, %a store i16 %res, ptr %addr.2 ret ptr %addr.1 } define ptr @lhib(ptr %base, i16 %a) { -; RV32XTHEADMEMIDX-LABEL: lhib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lhib a2, (a0), 2, 0 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV32XTHEADMEMIDX-NEXT: sh a1, 2(a0) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lhib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lhib a2, (a0), 2, 0 -; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV64XTHEADMEMIDX-NEXT: sh a1, 2(a0) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i16, ptr %base, i16 1 +; CHECK-LABEL: lhib: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lhib a2, (a0), 2, 0 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: sh a1, 2(a0) +; CHECK-NEXT: ret + %addr = getelementptr i16, ptr %base, iXLen 1 %ld = load i16, ptr %addr - %addr.1 = getelementptr i16, ptr %base, i16 2 + %addr.1 = getelementptr i16, ptr %base, iXLen 2 %res = add i16 %ld, %a store i16 %res, ptr %addr.1 ret ptr %addr } -define ptr @lhuia(ptr %base, ptr %addr.2, i64 %a) { -; RV32XTHEADMEMIDX-LABEL: lhuia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lhuia a4, (a0), -16, 1 -; RV32XTHEADMEMIDX-NEXT: add a2, a4, a2 -; RV32XTHEADMEMIDX-NEXT: sltu a4, a2, a4 -; RV32XTHEADMEMIDX-NEXT: add a3, a3, a4 -; RV32XTHEADMEMIDX-NEXT: sw a2, 0(a1) -; RV32XTHEADMEMIDX-NEXT: sw a3, 4(a1) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lhuia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lhuia a3, (a0), -16, 1 -; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i16, ptr %base, i16 0 +define ptr @lhuia(ptr %base, ptr %addr.2, i32 %a) { +; CHECK-LABEL: lhuia: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lhuia a3, (a0), -16, 1 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: sw a2, 0(a1) +; CHECK-NEXT: ret + %addr = getelementptr i16, ptr %base, iXLen 0 %ld = load i16, ptr %addr - %zext = zext i16 %ld to i64 - %addr.1 = getelementptr i16, ptr %base, i16 -16 - %res = add i64 %zext, %a - store i64 %res, ptr %addr.2 + %zext = zext i16 %ld to i32 + %addr.1 = getelementptr i16, ptr %base, iXLen -16 + %res = add i32 %zext, %a + store i32 %res, ptr %addr.2 ret ptr %addr.1 } -define ptr @lhuib(ptr %base, i64 %a, ptr %addr.1) { -; RV32XTHEADMEMIDX-LABEL: lhuib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lhuib a4, (a0), 2, 0 -; RV32XTHEADMEMIDX-NEXT: add a1, a4, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a4, a1, a4 -; RV32XTHEADMEMIDX-NEXT: add a2, a2, a4 -; RV32XTHEADMEMIDX-NEXT: sw a1, 0(a3) -; RV32XTHEADMEMIDX-NEXT: sw a2, 4(a3) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lhuib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lhuib a3, (a0), 2, 0 -; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1 -; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i16, ptr %base, i16 1 +define ptr @lhuib(ptr %base, i32 %a, ptr %addr.1) { +; CHECK-LABEL: lhuib: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lhuib a3, (a0), 2, 0 +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: sw a1, 0(a2) +; CHECK-NEXT: ret + %addr = getelementptr i16, ptr %base, iXLen 1 %ld = load i16, ptr %addr - %zext = zext i16 %ld to i64 - %res = add i64 %zext, %a - store i64 %res, ptr %addr.1 + %zext = zext i16 %ld to i32 + %res = add i32 %zext, %a + store i32 %res, ptr %addr.1 ret ptr %addr } define ptr @lwia(ptr %base, ptr %addr.2, i32 %a) { -; RV32XTHEADMEMIDX-LABEL: lwia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lwia a3, (a0), -16, 2 -; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV32XTHEADMEMIDX-NEXT: sw a2, 0(a1) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lwia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lwia a3, (a0), -16, 2 -; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 -; RV64XTHEADMEMIDX-NEXT: sw a2, 0(a1) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i32, ptr %base, i32 0 +; CHECK-LABEL: lwia: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lwia a3, (a0), -16, 2 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: sw a2, 0(a1) +; CHECK-NEXT: ret + %addr = getelementptr i32, ptr %base, iXLen 0 %ld = load i32, ptr %addr - %addr.1 = getelementptr i32, ptr %base, i32 -16 + %addr.1 = getelementptr i32, ptr %base, iXLen -16 %res = add i32 %ld, %a store i32 %res, ptr %addr.2 ret ptr %addr.1 } define ptr @lwib(ptr %base, i32 %a) { -; RV32XTHEADMEMIDX-LABEL: lwib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lwib a2, (a0), 4, 0 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV32XTHEADMEMIDX-NEXT: sw a1, 4(a0) -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lwib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lwib a2, (a0), 4, 0 -; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV64XTHEADMEMIDX-NEXT: sw a1, 4(a0) -; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i32, ptr %base, i32 1 +; CHECK-LABEL: lwib: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lwib a2, (a0), 4, 0 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: sw a1, 4(a0) +; CHECK-NEXT: ret + %addr = getelementptr i32, ptr %base, iXLen 1 %ld = load i32, ptr %addr - %addr.1 = getelementptr i32, ptr %base, i32 2 + %addr.1 = getelementptr i32, ptr %base, iXLen 2 %res = add i32 %ld, %a store i32 %res, ptr %addr.1 ret ptr %addr @@ -255,10 +173,10 @@ define ptr @lwuia(ptr %base, ptr %addr.2, i64 %a) { ; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 ; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1) ; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i32, ptr %base, i32 0 + %addr = getelementptr i32, ptr %base, iXLen 0 %ld = load i32, ptr %addr %zext = zext i32 %ld to i64 - %addr.1 = getelementptr i32, ptr %base, i32 -16 + %addr.1 = getelementptr i32, ptr %base, iXLen -16 %res = add i64 %zext, %a store i64 %res, ptr %addr.2 ret ptr %addr.1 @@ -281,7 +199,7 @@ define ptr @lwuib(ptr %base, i64 %a, ptr %addr.1) { ; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1 ; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2) ; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i32, ptr %base, i32 1 + %addr = getelementptr i32, ptr %base, iXLen 1 %ld = load i32, ptr %addr %zext = zext i32 %ld to i64 %res = add i64 %zext, %a @@ -309,9 +227,9 @@ define ptr @ldia(ptr %base, ptr %addr.2, i64 %a) { ; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2 ; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1) ; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i64, ptr %base, i64 0 + %addr = getelementptr i64, ptr %base, iXLen 0 %ld = load i64, ptr %addr - %addr.1 = getelementptr i64, ptr %base, i64 -16 + %addr.1 = getelementptr i64, ptr %base, iXLen -16 %res = add i64 %ld, %a store i64 %res, ptr %addr.2 ret ptr %addr.1 @@ -336,117 +254,81 @@ define ptr @ldib(ptr %base, i64 %a) { ; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1 ; RV64XTHEADMEMIDX-NEXT: sd a1, 8(a0) ; RV64XTHEADMEMIDX-NEXT: ret - %addr = getelementptr i64, ptr %base, i64 1 + %addr = getelementptr i64, ptr %base, iXLen 1 %ld = load i64, ptr %addr - %addr.1 = getelementptr i64, ptr %base, i64 2 + %addr.1 = getelementptr i64, ptr %base, iXLen 2 %res = add i64 %ld, %a store i64 %res, ptr %addr.1 ret ptr %addr } define ptr @sbia(ptr %base, i8 %a, i8 %b) { -; RV32XTHEADMEMIDX-LABEL: sbia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.sbia a1, (a0), 1, 0 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: sbia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.sbia a1, (a0), 1, 0 -; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i8, ptr %base, i8 1 +; CHECK-LABEL: sbia: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.sbia a1, (a0), 1, 0 +; CHECK-NEXT: ret + %addr.1 = getelementptr i8, ptr %base, iXLen 1 %res = add i8 %a, %b store i8 %res, ptr %base ret ptr %addr.1 } define ptr @sbib(ptr %base, i8 %a, i8 %b) { -; RV32XTHEADMEMIDX-LABEL: sbib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.sbib a1, (a0), 1, 0 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: sbib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.sbib a1, (a0), 1, 0 -; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i8, ptr %base, i8 1 +; CHECK-LABEL: sbib: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.sbib a1, (a0), 1, 0 +; CHECK-NEXT: ret + %addr.1 = getelementptr i8, ptr %base, iXLen 1 %res = add i8 %a, %b store i8 %res, ptr %addr.1 ret ptr %addr.1 } define ptr @shia(ptr %base, i16 %a, i16 %b) { -; RV32XTHEADMEMIDX-LABEL: shia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.shia a1, (a0), -9, 1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: shia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.shia a1, (a0), -9, 1 -; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i16, ptr %base, i16 -9 +; CHECK-LABEL: shia: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.shia a1, (a0), -9, 1 +; CHECK-NEXT: ret + %addr.1 = getelementptr i16, ptr %base, iXLen -9 %res = add i16 %a, %b store i16 %res, ptr %base ret ptr %addr.1 } define ptr @shib(ptr %base, i16 %a, i16 %b) { -; RV32XTHEADMEMIDX-LABEL: shib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.shib a1, (a0), 2, 0 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: shib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.shib a1, (a0), 2, 0 -; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i16, ptr %base, i16 1 +; CHECK-LABEL: shib: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.shib a1, (a0), 2, 0 +; CHECK-NEXT: ret + %addr.1 = getelementptr i16, ptr %base, iXLen 1 %res = add i16 %a, %b store i16 %res, ptr %addr.1 ret ptr %addr.1 } define ptr @swia(ptr %base, i32 %a, i32 %b) { -; RV32XTHEADMEMIDX-LABEL: swia: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.swia a1, (a0), 8, 2 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: swia: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.swia a1, (a0), 8, 2 -; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i32, ptr %base, i32 8 +; CHECK-LABEL: swia: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.swia a1, (a0), 8, 2 +; CHECK-NEXT: ret + %addr.1 = getelementptr i32, ptr %base, iXLen 8 %res = add i32 %a, %b store i32 %res, ptr %base ret ptr %addr.1 } define ptr @swib(ptr %base, i32 %a, i32 %b) { -; RV32XTHEADMEMIDX-LABEL: swib: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.swib a1, (a0), -13, 3 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: swib: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.swib a1, (a0), -13, 3 -; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i32, ptr %base, i32 -26 +; CHECK-LABEL: swib: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.swib a1, (a0), -13, 3 +; CHECK-NEXT: ret + %addr.1 = getelementptr i32, ptr %base, iXLen -26 %res = add i32 %a, %b store i32 %res, ptr %addr.1 ret ptr %addr.1 @@ -470,7 +352,7 @@ define ptr @sdia(ptr %base, i64 %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 ; RV64XTHEADMEMIDX-NEXT: th.sdia a1, (a0), 8, 3 ; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i64, ptr %base, i64 8 + %addr.1 = getelementptr i64, ptr %base, iXLen 8 %res = add i64 %a, %b store i64 %res, ptr %base ret ptr %addr.1 @@ -492,48 +374,33 @@ define ptr @sdib(ptr %base, i64 %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 ; RV64XTHEADMEMIDX-NEXT: th.sdib a1, (a0), 8, 0 ; RV64XTHEADMEMIDX-NEXT: ret - %addr.1 = getelementptr i64, ptr %base, i64 1 + %addr.1 = getelementptr i64, ptr %base, iXLen 1 %res = add i64 %a, %b store i64 %res, ptr %addr.1 ret ptr %addr.1 } -define i8 @lrb_anyext(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrb_anyext: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrb_anyext: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i8, ptr %a, i64 %b +define i8 @lrb_anyext(ptr %a, iXLen %b) { +; CHECK-LABEL: lrb_anyext: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrb a0, a0, a1, 0 +; CHECK-NEXT: ret + %1 = getelementptr i8, ptr %a, iXLen %b %2 = load i8, ptr %1, align 1 ret i8 %2 } -define i64 @lrb(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrb: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrb a1, a0, a1, 0 -; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 -; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrb: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0 -; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i8, ptr %a, i64 %b +define i32 @lrb(ptr %a, iXLen %b) { +; CHECK-LABEL: lrb: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrb a0, a0, a1, 0 +; CHECK-NEXT: add a0, a0, a0 +; CHECK-NEXT: ret + %1 = getelementptr i8, ptr %a, iXLen %b %2 = load i8, ptr %1, align 1 - %3 = sext i8 %2 to i64 - %4 = add i64 %3, %3 - ret i64 %4 + %3 = sext i8 %2 to i32 + %4 = add i32 %3, %3 + ret i32 %4 } define i8 @lurb_anyext(ptr %a, i32 %b) { @@ -552,15 +419,11 @@ define i8 @lurb_anyext(ptr %a, i32 %b) { ret i8 %3 } -define i64 @lurb(ptr %a, i32 %b) { +define i32 @lurb(ptr %a, i32 %b) { ; RV32XTHEADMEMIDX-LABEL: lurb: ; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrb a1, a0, a1, 0 -; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 -; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 +; RV32XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0 +; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV32XTHEADMEMIDX-NEXT: ret ; ; RV64XTHEADMEMIDX-LABEL: lurb: @@ -571,37 +434,29 @@ define i64 @lurb(ptr %a, i32 %b) { %1 = zext i32 %b to i64 %2 = getelementptr i8, ptr %a, i64 %1 %3 = load i8, ptr %2, align 1 - %4 = sext i8 %3 to i64 - %5 = add i64 %4, %4 - ret i64 %5 -} - -define i64 @lrbu(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrbu: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrbu a1, a0, a1, 0 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrbu: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrbu a0, a0, a1, 0 -; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i8, ptr %a, i64 %b + %4 = sext i8 %3 to i32 + %5 = add i32 %4, %4 + ret i32 %5 +} + +define i32 @lrbu(ptr %a, iXLen %b) { +; CHECK-LABEL: lrbu: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrbu a0, a0, a1, 0 +; CHECK-NEXT: add a0, a0, a0 +; CHECK-NEXT: ret + %1 = getelementptr i8, ptr %a, iXLen %b %2 = load i8, ptr %1, align 1 - %3 = zext i8 %2 to i64 - %4 = add i64 %3, %3 - ret i64 %4 + %3 = zext i8 %2 to i32 + %4 = add i32 %3, %3 + ret i32 %4 } -define i64 @lurbu(ptr %a, i32 %b) { +define i32 @lurbu(ptr %a, i32 %b) { ; RV32XTHEADMEMIDX-LABEL: lurbu: ; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrbu a1, a0, a1, 0 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 +; RV32XTHEADMEMIDX-NEXT: th.lrbu a0, a0, a1, 0 +; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV32XTHEADMEMIDX-NEXT: ret ; ; RV64XTHEADMEMIDX-LABEL: lurbu: @@ -612,47 +467,32 @@ define i64 @lurbu(ptr %a, i32 %b) { %1 = zext i32 %b to i64 %2 = getelementptr i8, ptr %a, i64 %1 %3 = load i8, ptr %2, align 1 - %4 = zext i8 %3 to i64 - %5 = add i64 %4, %4 - ret i64 %5 + %4 = zext i8 %3 to i32 + %5 = add i32 %4, %4 + ret i32 %5 } -define i16 @lrh_anyext(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrh_anyext: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrh_anyext: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i16, ptr %a, i64 %b +define i16 @lrh_anyext(ptr %a, iXLen %b) { +; CHECK-LABEL: lrh_anyext: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrh a0, a0, a1, 1 +; CHECK-NEXT: ret + %1 = getelementptr i16, ptr %a, iXLen %b %2 = load i16, ptr %1, align 2 ret i16 %2 } -define i64 @lrh(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrh: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrh a1, a0, a1, 1 -; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 -; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrh: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1 -; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i16, ptr %a, i64 %b +define i32 @lrh(ptr %a, iXLen %b) { +; CHECK-LABEL: lrh: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrh a0, a0, a1, 1 +; CHECK-NEXT: add a0, a0, a0 +; CHECK-NEXT: ret + %1 = getelementptr i16, ptr %a, iXLen %b %2 = load i16, ptr %1, align 2 - %3 = sext i16 %2 to i64 - %4 = add i64 %3, %3 - ret i64 %4 + %3 = sext i16 %2 to i32 + %4 = add i32 %3, %3 + ret i32 %4 } define i16 @lurh_anyext(ptr %a, i32 %b) { @@ -671,15 +511,11 @@ define i16 @lurh_anyext(ptr %a, i32 %b) { ret i16 %3 } -define i64 @lurh(ptr %a, i32 %b) { +define i32 @lurh(ptr %a, i32 %b) { ; RV32XTHEADMEMIDX-LABEL: lurh: ; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrh a1, a0, a1, 1 -; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 -; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1 +; RV32XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1 +; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV32XTHEADMEMIDX-NEXT: ret ; ; RV64XTHEADMEMIDX-LABEL: lurh: @@ -690,37 +526,29 @@ define i64 @lurh(ptr %a, i32 %b) { %1 = zext i32 %b to i64 %2 = getelementptr i16, ptr %a, i64 %1 %3 = load i16, ptr %2, align 2 - %4 = sext i16 %3 to i64 - %5 = add i64 %4, %4 - ret i64 %5 -} - -define i64 @lrhu(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrhu: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrhu a1, a0, a1, 1 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrhu: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrhu a0, a0, a1, 1 -; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i16, ptr %a, i64 %b + %4 = sext i16 %3 to i32 + %5 = add i32 %4, %4 + ret i32 %5 +} + +define i32 @lrhu(ptr %a, iXLen %b) { +; CHECK-LABEL: lrhu: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrhu a0, a0, a1, 1 +; CHECK-NEXT: add a0, a0, a0 +; CHECK-NEXT: ret + %1 = getelementptr i16, ptr %a, iXLen %b %2 = load i16, ptr %1, align 2 - %3 = zext i16 %2 to i64 - %4 = add i64 %3, %3 - ret i64 %4 + %3 = zext i16 %2 to i32 + %4 = add i32 %3, %3 + ret i32 %4 } -define i64 @lurhu(ptr %a, i32 %b) { +define i32 @lurhu(ptr %a, i32 %b) { ; RV32XTHEADMEMIDX-LABEL: lurhu: ; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrhu a1, a0, a1, 1 -; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1 -; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1 +; RV32XTHEADMEMIDX-NEXT: th.lrhu a0, a0, a1, 1 +; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV32XTHEADMEMIDX-NEXT: ret ; ; RV64XTHEADMEMIDX-LABEL: lurhu: @@ -731,27 +559,22 @@ define i64 @lurhu(ptr %a, i32 %b) { %1 = zext i32 %b to i64 %2 = getelementptr i16, ptr %a, i64 %1 %3 = load i16, ptr %2, align 2 - %4 = zext i16 %3 to i64 - %5 = add i64 %4, %4 - ret i64 %5 + %4 = zext i16 %3 to i32 + %5 = add i32 %4, %4 + ret i32 %5 } -define i32 @lrw_anyext(ptr %a, i64 %b) { -; RV32XTHEADMEMIDX-LABEL: lrw_anyext: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: lrw_anyext: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2 -; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i32, ptr %a, i64 %b +define i32 @lrw_anyext(ptr %a, iXLen %b) { +; CHECK-LABEL: lrw_anyext: +; CHECK: # %bb.0: +; CHECK-NEXT: th.lrw a0, a0, a1, 2 +; CHECK-NEXT: ret + %1 = getelementptr i32, ptr %a, iXLen %b %2 = load i32, ptr %1, align 4 ret i32 %2 } -define i64 @lrw(ptr %a, i64 %b) { +define i64 @lrw(ptr %a, iXLen %b) { ; RV32XTHEADMEMIDX-LABEL: lrw: ; RV32XTHEADMEMIDX: # %bb.0: ; RV32XTHEADMEMIDX-NEXT: th.lrw a1, a0, a1, 2 @@ -767,7 +590,7 @@ define i64 @lrw(ptr %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2 ; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i32, ptr %a, i64 %b + %1 = getelementptr i32, ptr %a, iXLen %b %2 = load i32, ptr %1, align 4 %3 = sext i32 %2 to i64 %4 = add i64 %3, %3 @@ -814,7 +637,7 @@ define i64 @lurw(ptr %a, i32 %b) { ret i64 %5 } -define i64 @lrwu(ptr %a, i64 %b) { +define i64 @lrwu(ptr %a, iXLen %b) { ; RV32XTHEADMEMIDX-LABEL: lrwu: ; RV32XTHEADMEMIDX: # %bb.0: ; RV32XTHEADMEMIDX-NEXT: th.lrw a1, a0, a1, 2 @@ -827,7 +650,7 @@ define i64 @lrwu(ptr %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: th.lrwu a0, a0, a1, 2 ; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i32, ptr %a, i64 %b + %1 = getelementptr i32, ptr %a, iXLen %b %2 = load i32, ptr %1, align 4 %3 = zext i32 %2 to i64 %4 = add i64 %3, %3 @@ -855,7 +678,7 @@ define i64 @lurwu(ptr %a, i32 %b) { ret i64 %5 } -define i64 @lrd(ptr %a, i64 %b) { +define i64 @lrd(ptr %a, iXLen %b) { ; RV32XTHEADMEMIDX-LABEL: lrd: ; RV32XTHEADMEMIDX: # %bb.0: ; RV32XTHEADMEMIDX-NEXT: th.lrw a2, a0, a1, 3 @@ -872,13 +695,13 @@ define i64 @lrd(ptr %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: th.lrd a0, a0, a1, 3 ; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV64XTHEADMEMIDX-NEXT: ret - %1 = getelementptr i64, ptr %a, i64 %b + %1 = getelementptr i64, ptr %a, iXLen %b %2 = load i64, ptr %1, align 8 %3 = add i64 %2, %2 ret i64 %3 } -define i64 @lrd_2(ptr %a, i64 %b) { +define i64 @lrd_2(ptr %a, iXLen %b) { ; RV32XTHEADMEMIDX-LABEL: lrd_2: ; RV32XTHEADMEMIDX: # %bb.0: ; RV32XTHEADMEMIDX-NEXT: addi a2, a0, 96 @@ -897,8 +720,8 @@ define i64 @lrd_2(ptr %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: th.lrd a0, a0, a1, 3 ; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0 ; RV64XTHEADMEMIDX-NEXT: ret - %1 = add i64 %b, 12 - %2 = getelementptr i64, ptr %a, i64 %1 + %1 = add iXLen %b, 12 + %2 = getelementptr i64, ptr %a, iXLen %1 %3 = load i64, ptr %2, align 8 %4 = add i64 %3, %3 ret i64 %4 @@ -928,20 +751,14 @@ define i64 @lurd(ptr %a, i32 %b) { ret i64 %4 } -define void @srb(ptr %a, i64 %b, i8 %c) { -; RV32XTHEADMEMIDX-LABEL: srb: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3 -; RV32XTHEADMEMIDX-NEXT: th.srb a3, a0, a1, 0 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: srb: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV64XTHEADMEMIDX-NEXT: th.srb a2, a0, a1, 0 -; RV64XTHEADMEMIDX-NEXT: ret +define void @srb(ptr %a, iXLen %b, i8 %c) { +; CHECK-LABEL: srb: +; CHECK: # %bb.0: +; CHECK-NEXT: add a2, a2, a2 +; CHECK-NEXT: th.srb a2, a0, a1, 0 +; CHECK-NEXT: ret %1 = add i8 %c, %c - %2 = getelementptr i8, ptr %a, i64 %b + %2 = getelementptr i8, ptr %a, iXLen %b store i8 %1, ptr %2, align 1 ret void } @@ -965,20 +782,14 @@ define void @surb(ptr %a, i32 %b, i8 %c) { ret void } -define void @srh(ptr %a, i64 %b, i16 %c) { -; RV32XTHEADMEMIDX-LABEL: srh: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3 -; RV32XTHEADMEMIDX-NEXT: th.srh a3, a0, a1, 1 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: srh: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV64XTHEADMEMIDX-NEXT: th.srh a2, a0, a1, 1 -; RV64XTHEADMEMIDX-NEXT: ret +define void @srh(ptr %a, iXLen %b, i16 %c) { +; CHECK-LABEL: srh: +; CHECK: # %bb.0: +; CHECK-NEXT: add a2, a2, a2 +; CHECK-NEXT: th.srh a2, a0, a1, 1 +; CHECK-NEXT: ret %1 = add i16 %c, %c - %2 = getelementptr i16, ptr %a, i64 %b + %2 = getelementptr i16, ptr %a, iXLen %b store i16 %1, ptr %2, align 2 ret void } @@ -1002,20 +813,14 @@ define void @surh(ptr %a, i32 %b, i16 %c) { ret void } -define void @srw(ptr %a, i64 %b, i32 %c) { -; RV32XTHEADMEMIDX-LABEL: srw: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3 -; RV32XTHEADMEMIDX-NEXT: th.srw a3, a0, a1, 2 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: srw: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2 -; RV64XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 2 -; RV64XTHEADMEMIDX-NEXT: ret +define void @srw(ptr %a, iXLen %b, i32 %c) { +; CHECK-LABEL: srw: +; CHECK: # %bb.0: +; CHECK-NEXT: add a2, a2, a2 +; CHECK-NEXT: th.srw a2, a0, a1, 2 +; CHECK-NEXT: ret %1 = add i32 %c, %c - %2 = getelementptr i32, ptr %a, i64 %b + %2 = getelementptr i32, ptr %a, iXLen %b store i32 %1, ptr %2, align 4 ret void } @@ -1039,16 +844,16 @@ define void @surw(ptr %a, i32 %b, i32 %c) { ret void } -define void @srd(ptr %a, i64 %b, i64 %c) { +define void @srd(ptr %a, iXLen %b, i64 %c) { ; RV32XTHEADMEMIDX-LABEL: srd: ; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a2, a3, a3 -; RV32XTHEADMEMIDX-NEXT: add a4, a4, a4 -; RV32XTHEADMEMIDX-NEXT: sltu a3, a2, a3 -; RV32XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 3 -; RV32XTHEADMEMIDX-NEXT: add a3, a4, a3 +; RV32XTHEADMEMIDX-NEXT: add a4, a2, a2 +; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3 +; RV32XTHEADMEMIDX-NEXT: sltu a2, a4, a2 +; RV32XTHEADMEMIDX-NEXT: th.srw a4, a0, a1, 3 +; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2 ; RV32XTHEADMEMIDX-NEXT: addi a0, a0, 4 -; RV32XTHEADMEMIDX-NEXT: th.srw a3, a0, a1, 3 +; RV32XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 3 ; RV32XTHEADMEMIDX-NEXT: ret ; ; RV64XTHEADMEMIDX-LABEL: srd: @@ -1057,7 +862,7 @@ define void @srd(ptr %a, i64 %b, i64 %c) { ; RV64XTHEADMEMIDX-NEXT: th.srd a2, a0, a1, 3 ; RV64XTHEADMEMIDX-NEXT: ret %1 = add i64 %c, %c - %2 = getelementptr i64, ptr %a, i64 %b + %2 = getelementptr i64, ptr %a, iXLen %b store i64 %1, ptr %2, align 8 ret void } @@ -1087,24 +892,18 @@ define void @surd(ptr %a, i32 %b, i64 %c) { } define ptr @test_simm5(ptr %base, i32 %a, i32 %b) { -; RV32XTHEADMEMIDX-LABEL: test_simm5: -; RV32XTHEADMEMIDX: # %bb.0: -; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV32XTHEADMEMIDX-NEXT: th.swia a1, (a0), -12, 2 -; RV32XTHEADMEMIDX-NEXT: ret -; -; RV64XTHEADMEMIDX-LABEL: test_simm5: -; RV64XTHEADMEMIDX: # %bb.0: -; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2 -; RV64XTHEADMEMIDX-NEXT: th.swia a1, (a0), -12, 2 -; RV64XTHEADMEMIDX-NEXT: ret +; CHECK-LABEL: test_simm5: +; CHECK: # %bb.0: +; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: th.swia a1, (a0), -12, 2 +; CHECK-NEXT: ret %addr.1 = getelementptr i32, ptr %base, i32 -12 %res = add i32 %a, %b store i32 %res, ptr %base ret ptr %addr.1 } -define i64 @lrd_large_shift(ptr %a, i64 %b) { +define i64 @lrd_large_shift(ptr %a, iXLen %b) { ; RV32XTHEADMEMIDX-LABEL: lrd_large_shift: ; RV32XTHEADMEMIDX: # %bb.0: ; RV32XTHEADMEMIDX-NEXT: slli a1, a1, 5 @@ -1119,14 +918,14 @@ define i64 @lrd_large_shift(ptr %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: add a0, a1, a0 ; RV64XTHEADMEMIDX-NEXT: ld a0, 384(a0) ; RV64XTHEADMEMIDX-NEXT: ret - %1 = add i64 %b, 12 - %2 = shl i64 %1, 2 - %3 = getelementptr i64, ptr %a, i64 %2 + %1 = add iXLen %b, 12 + %2 = shl iXLen %1, 2 + %3 = getelementptr i64, ptr %a, iXLen %2 %4 = load i64, ptr %3, align 8 ret i64 %4 } -define i64 @lrd_large_offset(ptr %a, i64 %b) { +define i64 @lrd_large_offset(ptr %a, iXLen %b) { ; RV32XTHEADMEMIDX-LABEL: lrd_large_offset: ; RV32XTHEADMEMIDX: # %bb.0: ; RV32XTHEADMEMIDX-NEXT: slli a1, a1, 3 @@ -1145,8 +944,8 @@ define i64 @lrd_large_offset(ptr %a, i64 %b) { ; RV64XTHEADMEMIDX-NEXT: add a0, a0, a1 ; RV64XTHEADMEMIDX-NEXT: ld a0, 1792(a0) ; RV64XTHEADMEMIDX-NEXT: ret - %1 = add i64 %b, 12000 - %2 = getelementptr i64, ptr %a, i64 %1 + %1 = add iXLen %b, 12000 + %2 = getelementptr i64, ptr %a, iXLen %1 %3 = load i64, ptr %2, align 8 ret i64 %3 } diff --git a/llvm/test/CodeGen/SPARC/tls-sp.ll b/llvm/test/CodeGen/SPARC/tls-sp.ll new file mode 100644 index 0000000..de9af01 --- /dev/null +++ b/llvm/test/CodeGen/SPARC/tls-sp.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=sparc -relocation-model=pic < %s | FileCheck --check-prefix=SPARC %s +; RUN: llc -mtriple=sparc64 -relocation-model=pic < %s | FileCheck --check-prefix=SPARC64 %s + +@x = external thread_local global i8 + +;; Test that we don't over-allocate stack space when calling __tls_get_addr +;; with the call frame pseudos able to be eliminated. +define ptr @no_alloca() nounwind { +; SPARC-LABEL: no_alloca: +; SPARC: ! %bb.0: ! %entry +; SPARC-NEXT: save %sp, -96, %sp +; SPARC-NEXT: .Ltmp0: +; SPARC-NEXT: call .Ltmp1 +; SPARC-NEXT: .Ltmp2: +; SPARC-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.Ltmp0)), %i0 +; SPARC-NEXT: .Ltmp1: +; SPARC-NEXT: or %i0, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.Ltmp0)), %i0 +; SPARC-NEXT: add %i0, %o7, %i0 +; SPARC-NEXT: sethi %tgd_hi22(x), %i1 +; SPARC-NEXT: add %i1, %tgd_lo10(x), %i1 +; SPARC-NEXT: add %i0, %i1, %o0, %tgd_add(x) +; SPARC-NEXT: call __tls_get_addr, %tgd_call(x) +; SPARC-NEXT: nop +; SPARC-NEXT: ret +; SPARC-NEXT: restore %g0, %o0, %o0 +; +; SPARC64-LABEL: no_alloca: +; SPARC64: ! %bb.0: ! %entry +; SPARC64-NEXT: save %sp, -128, %sp +; SPARC64-NEXT: .Ltmp0: +; SPARC64-NEXT: rd %pc, %o7 +; SPARC64-NEXT: .Ltmp2: +; SPARC64-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.Ltmp0)), %i0 +; SPARC64-NEXT: .Ltmp1: +; SPARC64-NEXT: or %i0, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.Ltmp0)), %i0 +; SPARC64-NEXT: add %i0, %o7, %i0 +; SPARC64-NEXT: sethi %tgd_hi22(x), %i1 +; SPARC64-NEXT: add %i1, %tgd_lo10(x), %i1 +; SPARC64-NEXT: add %i0, %i1, %o0, %tgd_add(x) +; SPARC64-NEXT: call __tls_get_addr, %tgd_call(x) +; SPARC64-NEXT: nop +; SPARC64-NEXT: ret +; SPARC64-NEXT: restore %g0, %o0, %o0 +entry: + %0 = call ptr @llvm.threadlocal.address.p0(ptr @x) + ret ptr %0 +} + +;; Test that %sp is valid for the call to __tls_get_addr. We store to a dynamic +;; alloca in order to prevent eliminating any call frame pseudos from the call. +define ptr @dynamic_alloca(i64 %n) nounwind { +; SPARC-LABEL: dynamic_alloca: +; SPARC: ! %bb.0: ! %entry +; SPARC-NEXT: save %sp, -96, %sp +; SPARC-NEXT: .Ltmp3: +; SPARC-NEXT: call .Ltmp4 +; SPARC-NEXT: .Ltmp5: +; SPARC-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.Ltmp3)), %i0 +; SPARC-NEXT: .Ltmp4: +; SPARC-NEXT: or %i0, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.Ltmp3)), %i0 +; SPARC-NEXT: add %i0, %o7, %i0 +; SPARC-NEXT: sethi %tgd_hi22(x), %i2 +; SPARC-NEXT: add %i2, %tgd_lo10(x), %i2 +; SPARC-NEXT: add %i0, %i2, %o0, %tgd_add(x) +; SPARC-NEXT: call __tls_get_addr, %tgd_call(x) +; SPARC-NEXT: nop +; SPARC-NEXT: add %i1, 7, %i0 +; SPARC-NEXT: and %i0, -8, %i0 +; SPARC-NEXT: sub %sp, %i0, %i0 +; SPARC-NEXT: add %i0, -8, %sp +; SPARC-NEXT: mov 1, %i1 +; SPARC-NEXT: stb %i1, [%i0+88] +; SPARC-NEXT: ret +; SPARC-NEXT: restore %g0, %o0, %o0 +; +; SPARC64-LABEL: dynamic_alloca: +; SPARC64: ! %bb.0: ! %entry +; SPARC64-NEXT: save %sp, -128, %sp +; SPARC64-NEXT: .Ltmp3: +; SPARC64-NEXT: rd %pc, %o7 +; SPARC64-NEXT: .Ltmp5: +; SPARC64-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.Ltmp3)), %i1 +; SPARC64-NEXT: .Ltmp4: +; SPARC64-NEXT: or %i1, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.Ltmp3)), %i1 +; SPARC64-NEXT: add %i1, %o7, %i1 +; SPARC64-NEXT: sethi %tgd_hi22(x), %i2 +; SPARC64-NEXT: add %i2, %tgd_lo10(x), %i2 +; SPARC64-NEXT: add %i1, %i2, %o0, %tgd_add(x) +; SPARC64-NEXT: call __tls_get_addr, %tgd_call(x) +; SPARC64-NEXT: nop +; SPARC64-NEXT: add %i0, 15, %i0 +; SPARC64-NEXT: and %i0, -16, %i0 +; SPARC64-NEXT: sub %sp, %i0, %i0 +; SPARC64-NEXT: mov %i0, %sp +; SPARC64-NEXT: mov 1, %i1 +; SPARC64-NEXT: stb %i1, [%i0+2175] +; SPARC64-NEXT: ret +; SPARC64-NEXT: restore %g0, %o0, %o0 +entry: + %0 = call ptr @llvm.threadlocal.address.p0(ptr @x) + %1 = alloca i8, i64 %n + store i8 1, ptr %1 + ret ptr %0 +} diff --git a/llvm/test/CodeGen/SystemZ/pr60413.ll b/llvm/test/CodeGen/SystemZ/pr60413.ll index bbf4d50..8a6a303 100644 --- a/llvm/test/CodeGen/SystemZ/pr60413.ll +++ b/llvm/test/CodeGen/SystemZ/pr60413.ll @@ -16,31 +16,31 @@ define dso_local void @m() local_unnamed_addr #1 { ; CHECK-NEXT: stmg %r13, %r15, 104(%r15) ; CHECK-NEXT: aghi %r15, -168 ; CHECK-NEXT: lhrl %r1, f+4 +; CHECK-NEXT: sll %r1, 8 ; CHECK-NEXT: larl %r2, f -; CHECK-NEXT: llc %r2, 6(%r2) -; CHECK-NEXT: larl %r3, e -; CHECK-NEXT: lb %r0, 3(%r3) -; CHECK-NEXT: rosbg %r2, %r1, 32, 55, 8 -; CHECK-NEXT: vlvgp %v0, %r2, %r0 -; CHECK-NEXT: vlvgf %v0, %r2, 0 -; CHECK-NEXT: vlvgf %v0, %r2, 2 -; CHECK-NEXT: vlvgp %v1, %r0, %r2 -; CHECK-NEXT: vlvgp %v2, %r2, %r2 -; CHECK-NEXT: lr %r1, %r2 +; CHECK-NEXT: ic %r1, 6(%r2) +; CHECK-NEXT: larl %r2, e +; CHECK-NEXT: lb %r0, 3(%r2) +; CHECK-NEXT: vlvgp %v0, %r0, %r1 +; CHECK-NEXT: vlvgp %v1, %r1, %r0 +; CHECK-NEXT: vlvgf %v1, %r1, 0 +; CHECK-NEXT: vlvgf %v1, %r1, 2 +; CHECK-NEXT: vlvgp %v2, %r1, %r1 +; CHECK-NEXT: # kill: def $r1l killed $r1l killed $r1d ; CHECK-NEXT: nilh %r1, 255 ; CHECK-NEXT: chi %r1, 128 ; CHECK-NEXT: ipm %r1 ; CHECK-NEXT: risbg %r1, %r1, 63, 191, 36 +; CHECK-NEXT: vlvgf %v0, %r0, 0 +; CHECK-NEXT: vlvgf %v0, %r0, 2 ; CHECK-NEXT: vgbm %v3, 30583 ; CHECK-NEXT: vn %v0, %v0, %v3 -; CHECK-NEXT: vlvgf %v1, %r0, 0 -; CHECK-NEXT: vlvgf %v1, %r0, 2 ; CHECK-NEXT: vn %v1, %v1, %v3 ; CHECK-NEXT: vrepf %v2, %v2, 1 ; CHECK-NEXT: vn %v2, %v2, %v3 ; CHECK-NEXT: vrepif %v3, 127 -; CHECK-NEXT: vchlf %v0, %v0, %v3 -; CHECK-NEXT: vlgvf %r13, %v0, 0 +; CHECK-NEXT: vchlf %v1, %v1, %v3 +; CHECK-NEXT: vlgvf %r13, %v1, 0 ; CHECK-NEXT: vchlf %v2, %v2, %v3 ; CHECK-NEXT: vlgvf %r3, %v2, 1 ; CHECK-NEXT: nilf %r3, 1 @@ -54,13 +54,13 @@ define dso_local void @m() local_unnamed_addr #1 { ; CHECK-NEXT: nilf %r14, 1 ; CHECK-NEXT: rosbg %r2, %r14, 32, 51, 12 ; CHECK-NEXT: rosbg %r2, %r13, 52, 52, 11 -; CHECK-NEXT: vlgvf %r13, %v0, 1 +; CHECK-NEXT: vlgvf %r13, %v1, 1 ; CHECK-NEXT: rosbg %r2, %r13, 53, 53, 10 -; CHECK-NEXT: vlgvf %r13, %v0, 2 +; CHECK-NEXT: vlgvf %r13, %v1, 2 ; CHECK-NEXT: rosbg %r2, %r13, 54, 54, 9 -; CHECK-NEXT: vlgvf %r13, %v0, 3 +; CHECK-NEXT: vlgvf %r13, %v1, 3 ; CHECK-NEXT: rosbg %r2, %r13, 55, 55, 8 -; CHECK-NEXT: vchlf %v0, %v1, %v3 +; CHECK-NEXT: vchlf %v0, %v0, %v3 ; CHECK-NEXT: vlgvf %r13, %v0, 0 ; CHECK-NEXT: rosbg %r2, %r13, 56, 56, 7 ; CHECK-NEXT: vlgvf %r13, %v0, 1 diff --git a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll new file mode 100644 index 0000000..97c2311 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll @@ -0,0 +1,1413 @@ +; RUN: opt -S -mattr=+simd128 -passes=loop-vectorize %s | llc -mtriple=wasm32 -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s + +target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20" + +%struct.TwoInts = type { i32, i32 } +%struct.ThreeInts = type { i32, i32, i32 } +%struct.FourInts = type { i32, i32, i32, i32 } +%struct.ThreeShorts = type { i16, i16, i16 } +%struct.FourShorts = type { i16, i16, i16, i16 } +%struct.FiveShorts = type { i16, i16, i16, i16, i16 } +%struct.TwoBytes = type { i8, i8 } +%struct.ThreeBytes = type { i8, i8, i8 } +%struct.FourBytes = type { i8, i8, i8, i8 } +%struct.EightBytes = type { i8, i8, i8, i8, i8, i8, i8, i8 } + +; CHECK-LABEL: two_ints_same_op: +; CHECK: loop +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +define hidden void @two_ints_same_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %21, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.TwoInts, ptr %1, i32 %8 + %10 = load i32, ptr %9, align 4 + %11 = getelementptr inbounds %struct.TwoInts, ptr %2, i32 %8 + %12 = load i32, ptr %11, align 4 + %13 = add i32 %12, %10 + %14 = getelementptr inbounds %struct.TwoInts, ptr %0, i32 %8 + store i32 %13, ptr %14, align 4 + %15 = getelementptr inbounds i8, ptr %9, i32 4 + %16 = load i32, ptr %15, align 4 + %17 = getelementptr inbounds i8, ptr %11, i32 4 + %18 = load i32, ptr %17, align 4 + %19 = add i32 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 4 + store i32 %19, ptr %20, align 4 + %21 = add nuw i32 %8, 1 + %22 = icmp eq i32 %21, %3 + br i1 %22, label %6, label %7 +} + +; CHECK-LABEL: two_ints_vary_op: +; CHECK: loop +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.sub +; CHECK: i32.store +define hidden void @two_ints_vary_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %21, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.TwoInts, ptr %1, i32 %8 + %10 = load i32, ptr %9, align 4 + %11 = getelementptr inbounds %struct.TwoInts, ptr %2, i32 %8 + %12 = load i32, ptr %11, align 4 + %13 = add i32 %12, %10 + %14 = getelementptr inbounds %struct.TwoInts, ptr %0, i32 %8 + store i32 %13, ptr %14, align 4 + %15 = getelementptr inbounds i8, ptr %9, i32 4 + %16 = load i32, ptr %15, align 4 + %17 = getelementptr inbounds i8, ptr %11, i32 4 + %18 = load i32, ptr %17, align 4 + %19 = sub i32 %16, %18 + %20 = getelementptr inbounds i8, ptr %14, i32 4 + store i32 %19, ptr %20, align 4 + %21 = add nuw i32 %8, 1 + %22 = icmp eq i32 %21, %3 + br i1 %22, label %6, label %7 +} + +; CHECK-LABEL: three_ints: +; CHECK: loop +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +define hidden void @three_ints(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %27, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.ThreeInts, ptr %1, i32 %8 + %10 = load i32, ptr %9, align 4 + %11 = getelementptr inbounds %struct.ThreeInts, ptr %2, i32 %8 + %12 = load i32, ptr %11, align 4 + %13 = add nsw i32 %12, %10 + %14 = getelementptr inbounds %struct.ThreeInts, ptr %0, i32 %8 + store i32 %13, ptr %14, align 4 + %15 = getelementptr inbounds i8, ptr %9, i32 4 + %16 = load i32, ptr %15, align 4 + %17 = getelementptr inbounds i8, ptr %11, i32 4 + %18 = load i32, ptr %17, align 4 + %19 = add nsw i32 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 4 + store i32 %19, ptr %20, align 4 + %21 = getelementptr inbounds i8, ptr %9, i32 8 + %22 = load i32, ptr %21, align 4 + %23 = getelementptr inbounds i8, ptr %11, i32 8 + %24 = load i32, ptr %23, align 4 + %25 = add nsw i32 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 8 + store i32 %25, ptr %26, align 4 + %27 = add nuw i32 %8, 1 + %28 = icmp eq i32 %27, %3 + br i1 %28, label %6, label %7 +} + +; CHECK-LABEL: three_shorts: +; CHECK: loop +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.mul +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.mul +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.mul +; CHECK: i32.store16 +define hidden void @three_shorts(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %27, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.ThreeShorts, ptr %1, i32 %8 + %10 = load i16, ptr %9, align 2 + %11 = getelementptr inbounds %struct.ThreeShorts, ptr %2, i32 %8 + %12 = load i16, ptr %11, align 2 + %13 = mul i16 %12, %10 + %14 = getelementptr inbounds %struct.ThreeShorts, ptr %0, i32 %8 + store i16 %13, ptr %14, align 2 + %15 = getelementptr inbounds i8, ptr %9, i32 2 + %16 = load i16, ptr %15, align 2 + %17 = getelementptr inbounds i8, ptr %11, i32 2 + %18 = load i16, ptr %17, align 2 + %19 = mul i16 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 2 + store i16 %19, ptr %20, align 2 + %21 = getelementptr inbounds i8, ptr %9, i32 4 + %22 = load i16, ptr %21, align 2 + %23 = getelementptr inbounds i8, ptr %11, i32 4 + %24 = load i16, ptr %23, align 2 + %25 = mul i16 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 4 + store i16 %25, ptr %26, align 2 + %27 = add nuw i32 %8, 1 + %28 = icmp eq i32 %27, %3 + br i1 %28, label %6, label %7 +} + +; CHECK-LABEL: four_shorts_same_op: +; CHECK: loop +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +define hidden void @four_shorts_same_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %33, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourShorts, ptr %1, i32 %8 + %10 = load i16, ptr %9, align 2 + %11 = getelementptr inbounds %struct.FourShorts, ptr %2, i32 %8 + %12 = load i16, ptr %11, align 2 + %13 = sub i16 %10, %12 + %14 = getelementptr inbounds %struct.FourShorts, ptr %0, i32 %8 + store i16 %13, ptr %14, align 2 + %15 = getelementptr inbounds i8, ptr %9, i32 2 + %16 = load i16, ptr %15, align 2 + %17 = getelementptr inbounds i8, ptr %11, i32 2 + %18 = load i16, ptr %17, align 2 + %19 = sub i16 %16, %18 + %20 = getelementptr inbounds i8, ptr %14, i32 2 + store i16 %19, ptr %20, align 2 + %21 = getelementptr inbounds i8, ptr %9, i32 4 + %22 = load i16, ptr %21, align 2 + %23 = getelementptr inbounds i8, ptr %11, i32 4 + %24 = load i16, ptr %23, align 2 + %25 = sub i16 %22, %24 + %26 = getelementptr inbounds i8, ptr %14, i32 4 + store i16 %25, ptr %26, align 2 + %27 = getelementptr inbounds i8, ptr %9, i32 6 + %28 = load i16, ptr %27, align 2 + %29 = getelementptr inbounds i8, ptr %11, i32 6 + %30 = load i16, ptr %29, align 2 + %31 = sub i16 %28, %30 + %32 = getelementptr inbounds i8, ptr %14, i32 6 + store i16 %31, ptr %32, align 2 + %33 = add nuw i32 %8, 1 + %34 = icmp eq i32 %33, %3 + br i1 %34, label %6, label %7 +} + +; CHECK-LABEL: four_shorts_split_op: +; CHECK: loop +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.or +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.or +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.xor +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.xor +; CHECK: i32.store16 +define hidden void @four_shorts_split_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %33, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourShorts, ptr %1, i32 %8 + %10 = load i16, ptr %9, align 2 + %11 = getelementptr inbounds %struct.FourShorts, ptr %2, i32 %8 + %12 = load i16, ptr %11, align 2 + %13 = or i16 %12, %10 + %14 = getelementptr inbounds %struct.FourShorts, ptr %0, i32 %8 + store i16 %13, ptr %14, align 2 + %15 = getelementptr inbounds i8, ptr %9, i32 2 + %16 = load i16, ptr %15, align 2 + %17 = getelementptr inbounds i8, ptr %11, i32 2 + %18 = load i16, ptr %17, align 2 + %19 = or i16 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 2 + store i16 %19, ptr %20, align 2 + %21 = getelementptr inbounds i8, ptr %9, i32 4 + %22 = load i16, ptr %21, align 2 + %23 = getelementptr inbounds i8, ptr %11, i32 4 + %24 = load i16, ptr %23, align 2 + %25 = xor i16 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 4 + store i16 %25, ptr %26, align 2 + %27 = getelementptr inbounds i8, ptr %9, i32 6 + %28 = load i16, ptr %27, align 2 + %29 = getelementptr inbounds i8, ptr %11, i32 6 + %30 = load i16, ptr %29, align 2 + %31 = xor i16 %30, %28 + %32 = getelementptr inbounds i8, ptr %14, i32 6 + store i16 %31, ptr %32, align 2 + %33 = add nuw i32 %8, 1 + %34 = icmp eq i32 %33, %3 + br i1 %34, label %6, label %7 +} + +; CHECK-LABEL: four_shorts_interleave_op: +; CHECK: loop +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.or +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.xor +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.or +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.xor +; CHECK: i32.store16 +define hidden void @four_shorts_interleave_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %33, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourShorts, ptr %1, i32 %8 + %10 = load i16, ptr %9, align 2 + %11 = getelementptr inbounds %struct.FourShorts, ptr %2, i32 %8 + %12 = load i16, ptr %11, align 2 + %13 = or i16 %12, %10 + %14 = getelementptr inbounds %struct.FourShorts, ptr %0, i32 %8 + store i16 %13, ptr %14, align 2 + %15 = getelementptr inbounds i8, ptr %9, i32 2 + %16 = load i16, ptr %15, align 2 + %17 = getelementptr inbounds i8, ptr %11, i32 2 + %18 = load i16, ptr %17, align 2 + %19 = xor i16 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 2 + store i16 %19, ptr %20, align 2 + %21 = getelementptr inbounds i8, ptr %9, i32 4 + %22 = load i16, ptr %21, align 2 + %23 = getelementptr inbounds i8, ptr %11, i32 4 + %24 = load i16, ptr %23, align 2 + %25 = or i16 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 4 + store i16 %25, ptr %26, align 2 + %27 = getelementptr inbounds i8, ptr %9, i32 6 + %28 = load i16, ptr %27, align 2 + %29 = getelementptr inbounds i8, ptr %11, i32 6 + %30 = load i16, ptr %29, align 2 + %31 = xor i16 %30, %28 + %32 = getelementptr inbounds i8, ptr %14, i32 6 + store i16 %31, ptr %32, align 2 + %33 = add nuw i32 %8, 1 + %34 = icmp eq i32 %33, %3 + br i1 %34, label %6, label %7 +} + +; CHECK-LABEL: five_shorts: +; CHECK: loop +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +; CHECK: i32.load16_u +; CHECK: i32.load16_u +; CHECK: i32.sub +; CHECK: i32.store16 +define hidden void @five_shorts(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %39, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FiveShorts, ptr %1, i32 %8 + %10 = load i16, ptr %9, align 1 + %11 = getelementptr inbounds %struct.FiveShorts, ptr %2, i32 %8 + %12 = load i16, ptr %11, align 1 + %13 = sub i16 %10, %12 + %14 = getelementptr inbounds %struct.FiveShorts, ptr %0, i32 %8 + store i16 %13, ptr %14, align 1 + %15 = getelementptr inbounds i16, ptr %9, i32 1 + %16 = load i16, ptr %15, align 1 + %17 = getelementptr inbounds i16, ptr %11, i32 1 + %18 = load i16, ptr %17, align 1 + %19 = sub i16 %16, %18 + %20 = getelementptr inbounds i16, ptr %14, i32 1 + store i16 %19, ptr %20, align 1 + %21 = getelementptr inbounds i16, ptr %9, i32 2 + %22 = load i16, ptr %21, align 1 + %23 = getelementptr inbounds i16, ptr %11, i32 2 + %24 = load i16, ptr %23, align 1 + %25 = sub i16 %22, %24 + %26 = getelementptr inbounds i16, ptr %14, i32 2 + store i16 %25, ptr %26, align 1 + %27 = getelementptr inbounds i16, ptr %9, i32 3 + %28 = load i16, ptr %27, align 1 + %29 = getelementptr inbounds i16, ptr %11, i32 3 + %30 = load i16, ptr %29, align 1 + %31 = sub i16 %28, %30 + %32 = getelementptr inbounds i16, ptr %14, i32 3 + store i16 %31, ptr %32, align 1 + %33 = getelementptr inbounds i16, ptr %9, i32 4 + %34 = load i16, ptr %33, align 1 + %35 = getelementptr inbounds i16, ptr %11, i32 4 + %36 = load i16, ptr %35, align 1 + %37 = sub i16 %34, %36 + %38 = getelementptr inbounds i16, ptr %14, i32 4 + store i16 %37, ptr %38, align 1 + %39 = add nuw i32 %8, 1 + %40 = icmp eq i32 %39, %3 + br i1 %40, label %6, label %7 +} + +; CHECK-LABEL: two_bytes_same_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +define hidden void @two_bytes_same_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %21, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.TwoBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.TwoBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = mul i8 %12, %10 + %14 = getelementptr inbounds %struct.TwoBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = mul i8 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = add nuw i32 %8, 1 + %22 = icmp eq i32 %21, %3 + br i1 %22, label %6, label %7 +} + +; CHECK-LABEL: two_bytes_vary_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +define hidden void @two_bytes_vary_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %21, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.TwoBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.TwoBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = mul i8 %12, %10 + %14 = getelementptr inbounds %struct.TwoBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = sub i8 %16, %18 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = add nuw i32 %8, 1 + %22 = icmp eq i32 %21, %3 + br i1 %22, label %6, label %7 +} + +; CHECK-LABEL: three_bytes_same_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +define hidden void @three_bytes_same_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %27, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.ThreeBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.ThreeBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = and i8 %12, %10 + %14 = getelementptr inbounds %struct.ThreeBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = and i8 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = and i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = add nuw i32 %8, 1 + %28 = icmp eq i32 %27, %3 + br i1 %28, label %6, label %7 +} + +; CHECK-LABEL: three_bytes_interleave_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +define hidden void @three_bytes_interleave_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %27, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.ThreeBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.ThreeBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = add i8 %12, %10 + %14 = getelementptr inbounds %struct.ThreeBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = sub i8 %16, %18 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = add i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = add nuw i32 %8, 1 + %28 = icmp eq i32 %27, %3 + br i1 %28, label %6, label %7 +} + +; CHECK-LABEL: four_bytes_same_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store8 +define hidden void @four_bytes_same_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %33, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.FourBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = and i8 %12, %10 + %14 = getelementptr inbounds %struct.FourBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = and i8 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = and i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = getelementptr inbounds i8, ptr %9, i32 3 + %28 = load i8, ptr %27, align 1 + %29 = getelementptr inbounds i8, ptr %11, i32 3 + %30 = load i8, ptr %29, align 1 + %31 = and i8 %30, %28 + %32 = getelementptr inbounds i8, ptr %14, i32 3 + store i8 %31, ptr %32, align 1 + %33 = add nuw i32 %8, 1 + %34 = icmp eq i32 %33, %3 + br i1 %34, label %6, label %7 +} + +; CHECK-LABEL: four_bytes_split_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +define hidden void @four_bytes_split_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %33, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.FourBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = mul i8 %12, %10 + %14 = getelementptr inbounds %struct.FourBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = mul i8 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = sub i8 %22, %24 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = getelementptr inbounds i8, ptr %9, i32 3 + %28 = load i8, ptr %27, align 1 + %29 = getelementptr inbounds i8, ptr %11, i32 3 + %30 = load i8, ptr %29, align 1 + %31 = sub i8 %28, %30 + %32 = getelementptr inbounds i8, ptr %14, i32 3 + store i8 %31, ptr %32, align 1 + %33 = add nuw i32 %8, 1 + %34 = icmp eq i32 %33, %3 + br i1 %34, label %6, label %7 +} + +; CHECK-LABEL: four_bytes_interleave_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +define hidden void @four_bytes_interleave_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %33, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.FourBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = add i8 %12, %10 + %14 = getelementptr inbounds %struct.FourBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = sub i8 %16, %18 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = add i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = getelementptr inbounds i8, ptr %9, i32 3 + %28 = load i8, ptr %27, align 1 + %29 = getelementptr inbounds i8, ptr %11, i32 3 + %30 = load i8, ptr %29, align 1 + %31 = sub i8 %28, %30 + %32 = getelementptr inbounds i8, ptr %14, i32 3 + store i8 %31, ptr %32, align 1 + %33 = add nuw i32 %8, 1 + %34 = icmp eq i32 %33, %3 + br i1 %34, label %6, label %7 +} + +; CHECK-LABEL: eight_bytes_same_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store8 +define hidden void @eight_bytes_same_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %57, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.EightBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.EightBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = mul i8 %12, %10 + %14 = getelementptr inbounds %struct.EightBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = mul i8 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = mul i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = getelementptr inbounds i8, ptr %9, i32 3 + %28 = load i8, ptr %27, align 1 + %29 = getelementptr inbounds i8, ptr %11, i32 3 + %30 = load i8, ptr %29, align 1 + %31 = mul i8 %30, %28 + %32 = getelementptr inbounds i8, ptr %14, i32 3 + store i8 %31, ptr %32, align 1 + %33 = getelementptr inbounds i8, ptr %9, i32 4 + %34 = load i8, ptr %33, align 1 + %35 = getelementptr inbounds i8, ptr %11, i32 4 + %36 = load i8, ptr %35, align 1 + %37 = mul i8 %36, %34 + %38 = getelementptr inbounds i8, ptr %14, i32 4 + store i8 %37, ptr %38, align 1 + %39 = getelementptr inbounds i8, ptr %9, i32 5 + %40 = load i8, ptr %39, align 1 + %41 = getelementptr inbounds i8, ptr %11, i32 5 + %42 = load i8, ptr %41, align 1 + %43 = mul i8 %42, %40 + %44 = getelementptr inbounds i8, ptr %14, i32 5 + store i8 %43, ptr %44, align 1 + %45 = getelementptr inbounds i8, ptr %9, i32 6 + %46 = load i8, ptr %45, align 1 + %47 = getelementptr inbounds i8, ptr %11, i32 6 + %48 = load i8, ptr %47, align 1 + %49 = mul i8 %48, %46 + %50 = getelementptr inbounds i8, ptr %14, i32 6 + store i8 %49, ptr %50, align 1 + %51 = getelementptr inbounds i8, ptr %9, i32 7 + %52 = load i8, ptr %51, align 1 + %53 = getelementptr inbounds i8, ptr %11, i32 7 + %54 = load i8, ptr %53, align 1 + %55 = mul i8 %54, %52 + %56 = getelementptr inbounds i8, ptr %14, i32 7 + store i8 %55, ptr %56, align 1 + %57 = add nuw i32 %8, 1 + %58 = icmp eq i32 %57, %3 + br i1 %58, label %6, label %7 +} + +; CHECK-LABEL: eight_bytes_split_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +define hidden void @eight_bytes_split_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %57, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.EightBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.EightBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = add i8 %12, %10 + %14 = getelementptr inbounds %struct.EightBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = add i8 %18, %16 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = add i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = getelementptr inbounds i8, ptr %9, i32 3 + %28 = load i8, ptr %27, align 1 + %29 = getelementptr inbounds i8, ptr %11, i32 3 + %30 = load i8, ptr %29, align 1 + %31 = add i8 %30, %28 + %32 = getelementptr inbounds i8, ptr %14, i32 3 + store i8 %31, ptr %32, align 1 + %33 = getelementptr inbounds i8, ptr %9, i32 4 + %34 = load i8, ptr %33, align 1 + %35 = getelementptr inbounds i8, ptr %11, i32 4 + %36 = load i8, ptr %35, align 1 + %37 = sub i8 %34, %36 + %38 = getelementptr inbounds i8, ptr %14, i32 4 + store i8 %37, ptr %38, align 1 + %39 = getelementptr inbounds i8, ptr %9, i32 5 + %40 = load i8, ptr %39, align 1 + %41 = getelementptr inbounds i8, ptr %11, i32 5 + %42 = load i8, ptr %41, align 1 + %43 = sub i8 %40, %42 + %44 = getelementptr inbounds i8, ptr %14, i32 5 + store i8 %43, ptr %44, align 1 + %45 = getelementptr inbounds i8, ptr %9, i32 6 + %46 = load i8, ptr %45, align 1 + %47 = getelementptr inbounds i8, ptr %11, i32 6 + %48 = load i8, ptr %47, align 1 + %49 = sub i8 %46, %48 + %50 = getelementptr inbounds i8, ptr %14, i32 6 + store i8 %49, ptr %50, align 1 + %51 = getelementptr inbounds i8, ptr %9, i32 7 + %52 = load i8, ptr %51, align 1 + %53 = getelementptr inbounds i8, ptr %11, i32 7 + %54 = load i8, ptr %53, align 1 + %55 = sub i8 %52, %54 + %56 = getelementptr inbounds i8, ptr %14, i32 7 + store i8 %55, ptr %56, align 1 + %57 = add nuw i32 %8, 1 + %58 = icmp eq i32 %57, %3 + br i1 %58, label %6, label %7 +} + +; CHECK-LABEL: eight_bytes_interleave_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store8 +define hidden void @eight_bytes_interleave_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %57, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.EightBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = getelementptr inbounds %struct.EightBytes, ptr %2, i32 %8 + %12 = load i8, ptr %11, align 1 + %13 = add i8 %12, %10 + %14 = getelementptr inbounds %struct.EightBytes, ptr %0, i32 %8 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %9, i32 1 + %16 = load i8, ptr %15, align 1 + %17 = getelementptr inbounds i8, ptr %11, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = sub i8 %16, %18 + %20 = getelementptr inbounds i8, ptr %14, i32 1 + store i8 %19, ptr %20, align 1 + %21 = getelementptr inbounds i8, ptr %9, i32 2 + %22 = load i8, ptr %21, align 1 + %23 = getelementptr inbounds i8, ptr %11, i32 2 + %24 = load i8, ptr %23, align 1 + %25 = add i8 %24, %22 + %26 = getelementptr inbounds i8, ptr %14, i32 2 + store i8 %25, ptr %26, align 1 + %27 = getelementptr inbounds i8, ptr %9, i32 3 + %28 = load i8, ptr %27, align 1 + %29 = getelementptr inbounds i8, ptr %11, i32 3 + %30 = load i8, ptr %29, align 1 + %31 = sub i8 %28, %30 + %32 = getelementptr inbounds i8, ptr %14, i32 3 + store i8 %31, ptr %32, align 1 + %33 = getelementptr inbounds i8, ptr %9, i32 4 + %34 = load i8, ptr %33, align 1 + %35 = getelementptr inbounds i8, ptr %11, i32 4 + %36 = load i8, ptr %35, align 1 + %37 = add i8 %36, %34 + %38 = getelementptr inbounds i8, ptr %14, i32 4 + store i8 %37, ptr %38, align 1 + %39 = getelementptr inbounds i8, ptr %9, i32 5 + %40 = load i8, ptr %39, align 1 + %41 = getelementptr inbounds i8, ptr %11, i32 5 + %42 = load i8, ptr %41, align 1 + %43 = sub i8 %40, %42 + %44 = getelementptr inbounds i8, ptr %14, i32 5 + store i8 %43, ptr %44, align 1 + %45 = getelementptr inbounds i8, ptr %9, i32 6 + %46 = load i8, ptr %45, align 1 + %47 = getelementptr inbounds i8, ptr %11, i32 6 + %48 = load i8, ptr %47, align 1 + %49 = add i8 %48, %46 + %50 = getelementptr inbounds i8, ptr %14, i32 6 + store i8 %49, ptr %50, align 1 + %51 = getelementptr inbounds i8, ptr %9, i32 7 + %52 = load i8, ptr %51, align 1 + %53 = getelementptr inbounds i8, ptr %11, i32 7 + %54 = load i8, ptr %53, align 1 + %55 = sub i8 %52, %54 + %56 = getelementptr inbounds i8, ptr %14, i32 7 + store i8 %55, ptr %56, align 1 + %57 = add nuw i32 %8, 1 + %58 = icmp eq i32 %57, %3 + br i1 %58, label %6, label %7 +} + +; CHECK-LABEL: four_bytes_into_four_ints_same_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.load +; CHECK: i32.add +; CHECK: i32.store +define hidden void @four_bytes_into_four_ints_same_op(ptr noalias nocapture noundef %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %49, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = zext i8 %10 to i32 + %12 = getelementptr inbounds %struct.FourBytes, ptr %2, i32 %8 + %13 = load i8, ptr %12, align 1 + %14 = zext i8 %13 to i32 + %15 = mul nuw nsw i32 %14, %11 + %16 = getelementptr inbounds %struct.FourInts, ptr %0, i32 %8 + %17 = load i32, ptr %16, align 4 + %18 = add nsw i32 %15, %17 + store i32 %18, ptr %16, align 4 + %19 = getelementptr inbounds i8, ptr %9, i32 1 + %20 = load i8, ptr %19, align 1 + %21 = zext i8 %20 to i32 + %22 = getelementptr inbounds i8, ptr %12, i32 1 + %23 = load i8, ptr %22, align 1 + %24 = zext i8 %23 to i32 + %25 = mul nuw nsw i32 %24, %21 + %26 = getelementptr inbounds i8, ptr %16, i32 4 + %27 = load i32, ptr %26, align 4 + %28 = add nsw i32 %25, %27 + store i32 %28, ptr %26, align 4 + %29 = getelementptr inbounds i8, ptr %9, i32 2 + %30 = load i8, ptr %29, align 1 + %31 = zext i8 %30 to i32 + %32 = getelementptr inbounds i8, ptr %12, i32 2 + %33 = load i8, ptr %32, align 1 + %34 = zext i8 %33 to i32 + %35 = mul nuw nsw i32 %34, %31 + %36 = getelementptr inbounds i8, ptr %16, i32 8 + %37 = load i32, ptr %36, align 4 + %38 = add nsw i32 %35, %37 + store i32 %38, ptr %36, align 4 + %39 = getelementptr inbounds i8, ptr %9, i32 3 + %40 = load i8, ptr %39, align 1 + %41 = zext i8 %40 to i32 + %42 = getelementptr inbounds i8, ptr %12, i32 3 + %43 = load i8, ptr %42, align 1 + %44 = zext i8 %43 to i32 + %45 = mul nuw nsw i32 %44, %41 + %46 = getelementptr inbounds i8, ptr %16, i32 12 + %47 = load i32, ptr %46, align 4 + %48 = add nsw i32 %45, %47 + store i32 %48, ptr %46, align 4 + %49 = add nuw i32 %8, 1 + %50 = icmp eq i32 %49, %3 + br i1 %50, label %6, label %7 +} + +; CHECK-LABEL: four_bytes_into_four_ints_vary_op: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.add +; CHECK: i32.store +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.sub +; CHECK: i32.store +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.mul +; CHECK: i32.store +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.and +; CHECK: i32.store +define hidden void @four_bytes_into_four_ints_vary_op(ptr noalias nocapture noundef writeonly %0, ptr nocapture noundef readonly %1, ptr nocapture noundef readonly %2, i32 noundef %3) { + %5 = icmp eq i32 %3, 0 + br i1 %5, label %6, label %7 + +6: ; preds = %7, %4 + ret void + +7: ; preds = %4, %7 + %8 = phi i32 [ %40, %7 ], [ 0, %4 ] + %9 = getelementptr inbounds %struct.FourBytes, ptr %1, i32 %8 + %10 = load i8, ptr %9, align 1 + %11 = zext i8 %10 to i32 + %12 = getelementptr inbounds %struct.FourBytes, ptr %2, i32 %8 + %13 = load i8, ptr %12, align 1 + %14 = zext i8 %13 to i32 + %15 = add nuw nsw i32 %14, %11 + %16 = getelementptr inbounds %struct.FourInts, ptr %0, i32 %8 + store i32 %15, ptr %16, align 4 + %17 = getelementptr inbounds i8, ptr %9, i32 1 + %18 = load i8, ptr %17, align 1 + %19 = zext i8 %18 to i32 + %20 = getelementptr inbounds i8, ptr %12, i32 1 + %21 = load i8, ptr %20, align 1 + %22 = zext i8 %21 to i32 + %23 = sub nsw i32 %19, %22 + %24 = getelementptr inbounds i8, ptr %16, i32 4 + store i32 %23, ptr %24, align 4 + %25 = getelementptr inbounds i8, ptr %9, i32 2 + %26 = load i8, ptr %25, align 1 + %27 = zext i8 %26 to i32 + %28 = getelementptr inbounds i8, ptr %12, i32 2 + %29 = load i8, ptr %28, align 1 + %30 = zext i8 %29 to i32 + %31 = mul nuw nsw i32 %30, %27 + %32 = getelementptr inbounds i8, ptr %16, i32 8 + store i32 %31, ptr %32, align 4 + %33 = getelementptr inbounds i8, ptr %9, i32 3 + %34 = load i8, ptr %33, align 1 + %35 = getelementptr inbounds i8, ptr %12, i32 3 + %36 = load i8, ptr %35, align 1 + %37 = and i8 %36, %34 + %38 = zext i8 %37 to i32 + %39 = getelementptr inbounds i8, ptr %16, i32 12 + store i32 %38, ptr %39, align 4 + %40 = add nuw i32 %8, 1 + %41 = icmp eq i32 %40, %3 + br i1 %41, label %6, label %7 +} + +; CHECK-LABEL: scale_uv_row_down2: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.store8 +define hidden void @scale_uv_row_down2(ptr nocapture noundef readonly %0, i32 noundef %1, ptr nocapture noundef writeonly %2, i32 noundef %3) { + %5 = icmp sgt i32 %3, 0 + br i1 %5, label %6, label %19 + +6: ; preds = %4, %6 + %7 = phi i32 [ %17, %6 ], [ 0, %4 ] + %8 = phi ptr [ %15, %6 ], [ %0, %4 ] + %9 = phi ptr [ %16, %6 ], [ %2, %4 ] + %10 = getelementptr inbounds i8, ptr %8, i32 2 + %11 = load i8, ptr %10, align 1 + store i8 %11, ptr %9, align 1 + %12 = getelementptr inbounds i8, ptr %8, i32 3 + %13 = load i8, ptr %12, align 1 + %14 = getelementptr inbounds i8, ptr %9, i32 1 + store i8 %13, ptr %14, align 1 + %15 = getelementptr inbounds i8, ptr %8, i32 4 + %16 = getelementptr inbounds i8, ptr %9, i32 2 + %17 = add nuw nsw i32 %7, 1 + %18 = icmp eq i32 %17, %3 + br i1 %18, label %19, label %6 + +19: ; preds = %6, %4 + ret void +} + +; CHECK-LABEL: scale_uv_row_down2_box: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.shr_u +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.shr_u +; CHECK: i32.store8 +define hidden void @scale_uv_row_down2_box(ptr nocapture noundef readonly %0, i32 noundef %1, ptr nocapture noundef writeonly %2, i32 noundef %3) { + %5 = icmp sgt i32 %3, 0 + br i1 %5, label %6, label %54 + +6: ; preds = %4 + %7 = add nsw i32 %1, 2 + %8 = add nsw i32 %1, 1 + %9 = add nsw i32 %1, 3 + br label %10 + +10: ; preds = %6, %10 + %11 = phi i32 [ 0, %6 ], [ %52, %10 ] + %12 = phi ptr [ %0, %6 ], [ %50, %10 ] + %13 = phi ptr [ %2, %6 ], [ %51, %10 ] + %14 = load i8, ptr %12, align 1 + %15 = zext i8 %14 to i16 + %16 = getelementptr inbounds i8, ptr %12, i32 2 + %17 = load i8, ptr %16, align 1 + %18 = zext i8 %17 to i16 + %19 = getelementptr inbounds i8, ptr %12, i32 %1 + %20 = load i8, ptr %19, align 1 + %21 = zext i8 %20 to i16 + %22 = getelementptr inbounds i8, ptr %12, i32 %7 + %23 = load i8, ptr %22, align 1 + %24 = zext i8 %23 to i16 + %25 = add nuw nsw i16 %15, 2 + %26 = add nuw nsw i16 %25, %18 + %27 = add nuw nsw i16 %26, %21 + %28 = add nuw nsw i16 %27, %24 + %29 = lshr i16 %28, 2 + %30 = trunc nuw i16 %29 to i8 + store i8 %30, ptr %13, align 1 + %31 = getelementptr inbounds i8, ptr %12, i32 1 + %32 = load i8, ptr %31, align 1 + %33 = zext i8 %32 to i16 + %34 = getelementptr inbounds i8, ptr %12, i32 3 + %35 = load i8, ptr %34, align 1 + %36 = zext i8 %35 to i16 + %37 = getelementptr inbounds i8, ptr %12, i32 %8 + %38 = load i8, ptr %37, align 1 + %39 = zext i8 %38 to i16 + %40 = getelementptr inbounds i8, ptr %12, i32 %9 + %41 = load i8, ptr %40, align 1 + %42 = zext i8 %41 to i16 + %43 = add nuw nsw i16 %33, 2 + %44 = add nuw nsw i16 %43, %36 + %45 = add nuw nsw i16 %44, %39 + %46 = add nuw nsw i16 %45, %42 + %47 = lshr i16 %46, 2 + %48 = trunc nuw i16 %47 to i8 + %49 = getelementptr inbounds i8, ptr %13, i32 1 + store i8 %48, ptr %49, align 1 + %50 = getelementptr inbounds i8, ptr %12, i32 4 + %51 = getelementptr inbounds i8, ptr %13, i32 2 + %52 = add nuw nsw i32 %11, 1 + %53 = icmp eq i32 %52, %3 + br i1 %53, label %54, label %10 + +54: ; preds = %10, %4 + ret void +} + +; CHECK-LABEL: scale_uv_row_down2_linear: +; CHECK: loop +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.shr_u +; CHECK: i32.store8 +; CHECK: i32.load8_u +; CHECK: i32.load8_u +; CHECK: i32.shr_u +; CHECK: i32.store8 +define hidden void @scale_uv_row_down2_linear(ptr nocapture noundef readonly %0, i32 noundef %1, ptr nocapture noundef writeonly %2, i32 noundef %3) { + %5 = icmp sgt i32 %3, 0 + br i1 %5, label %6, label %34 + +6: ; preds = %4, %6 + %7 = phi i32 [ %32, %6 ], [ 0, %4 ] + %8 = phi ptr [ %30, %6 ], [ %0, %4 ] + %9 = phi ptr [ %31, %6 ], [ %2, %4 ] + %10 = load i8, ptr %8, align 1 + %11 = zext i8 %10 to i16 + %12 = getelementptr inbounds i8, ptr %8, i32 2 + %13 = load i8, ptr %12, align 1 + %14 = zext i8 %13 to i16 + %15 = add nuw nsw i16 %11, 1 + %16 = add nuw nsw i16 %15, %14 + %17 = lshr i16 %16, 1 + %18 = trunc nuw i16 %17 to i8 + store i8 %18, ptr %9, align 1 + %19 = getelementptr inbounds i8, ptr %8, i32 1 + %20 = load i8, ptr %19, align 1 + %21 = zext i8 %20 to i16 + %22 = getelementptr inbounds i8, ptr %8, i32 3 + %23 = load i8, ptr %22, align 1 + %24 = zext i8 %23 to i16 + %25 = add nuw nsw i16 %21, 1 + %26 = add nuw nsw i16 %25, %24 + %27 = lshr i16 %26, 1 + %28 = trunc nuw i16 %27 to i8 + %29 = getelementptr inbounds i8, ptr %9, i32 1 + store i8 %28, ptr %29, align 1 + %30 = getelementptr inbounds i8, ptr %8, i32 4 + %31 = getelementptr inbounds i8, ptr %9, i32 2 + %32 = add nuw nsw i32 %7, 1 + %33 = icmp eq i32 %32, %3 + br i1 %33, label %34, label %6 + +34: ; preds = %6, %4 + ret void +} diff --git a/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll b/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll index 3b3a460..ab6672e 100644 --- a/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll +++ b/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll @@ -1,4 +1,4 @@ -; RUN: sed -e s/.Cxx:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CXX,X64CXX +; RUN: sed -e s/.Cxx:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CXX ; RUN: sed -e s/.Seh:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=SEH ; RUN: %if aarch64-registered-target %{ sed -e s/.Cxx:// %s | llc -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefix=CXX %} ; RUN: %if aarch64-registered-target %{ sed -e s/.Seh:// %s | llc -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefix=SEH %} @@ -49,18 +49,14 @@ catch.body.2: ; CXX-NEXT: .[[ENTRY:long|word]] .Lfunc_begin0@IMGREL ; CXX-NEXT: .[[ENTRY]] -1 ; CXX-NEXT: .[[ENTRY]] .Ltmp0@IMGREL -; X64CXX-SAME: +1 ; CXX-NEXT: .[[ENTRY]] 1 ; CXX-NEXT: .[[ENTRY]] .Ltmp1@IMGREL -; X64CXX-SAME: +1 ; CXX-NEXT: .[[ENTRY]] -1 ; CXX-NEXT: .[[ENTRY]] "?catch$3@?0?test@4HA"@IMGREL ; CXX-NEXT: .[[ENTRY]] 2 ; CXX-NEXT: .[[ENTRY]] .Ltmp2@IMGREL -; X64CXX-SAME: +1 ; CXX-NEXT: .[[ENTRY]] 3 ; CXX-NEXT: .[[ENTRY]] .Ltmp3@IMGREL -; X64CXX-SAME: +1 ; CXX-NEXT: .[[ENTRY]] 2 ; CXX-NEXT: .[[ENTRY]] "?catch$5@?0?test@4HA"@IMGREL ; CXX-NEXT: .[[ENTRY]] 4 @@ -70,19 +66,19 @@ catch.body.2: ; SEH: .LBB0_[[CATCH:[0-9]+]]: {{.*}} %catch.body ; SEH-LABEL: .Llsda_begin0: ; SEH-NEXT: .[[ENTRY:long|word]] .Ltmp0@IMGREL -; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL+1 +; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL ; SEH-NEXT: .[[ENTRY]] dummy_filter@IMGREL ; SEH-NEXT: .[[ENTRY]] .LBB0_[[CATCH]]@IMGREL ; SEH-NEXT: .[[ENTRY]] .Ltmp0@IMGREL -; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL+1 +; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL ; SEH-NEXT: .[[ENTRY]] dummy_filter@IMGREL ; SEH-NEXT: .[[ENTRY]] .LBB0_[[CATCH2]]@IMGREL ; SEH-NEXT: .[[ENTRY]] .Ltmp2@IMGREL -; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL+1 +; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL ; SEH-NEXT: .[[ENTRY]] "?dtor$[[DTOR:[0-9]+]]@?0?test@4HA"@IMGREL ; SEH-NEXT: .[[ENTRY]] 0 ; SEH-NEXT: .[[ENTRY]] .Ltmp2@IMGREL -; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL+1 +; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL ; SEH-NEXT: .[[ENTRY]] dummy_filter@IMGREL ; SEH-NEXT: .[[ENTRY]] .LBB0_[[CATCH2]]@IMGREL ; SEH-NEXT: .Llsda_end0: diff --git a/llvm/test/CodeGen/X86/abds-neg.ll b/llvm/test/CodeGen/X86/abds-neg.ll index 2911edf..d9064c6 100644 --- a/llvm/test/CodeGen/X86/abds-neg.ll +++ b/llvm/test/CodeGen/X86/abds-neg.ll @@ -1076,15 +1076,15 @@ define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind { ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: subl {{[0-9]+}}(%esp), %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl %esi, %edx ; X86-NEXT: sarl $31, %edx -; X86-NEXT: xorl %edx, %ecx ; X86-NEXT: xorl %edx, %esi +; X86-NEXT: xorl %edx, %ecx ; X86-NEXT: movl %edx, %eax -; X86-NEXT: subl %esi, %eax -; X86-NEXT: sbbl %ecx, %edx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: sbbl %esi, %edx ; X86-NEXT: popl %esi ; X86-NEXT: retl ; @@ -1107,15 +1107,15 @@ define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind { ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: subl {{[0-9]+}}(%esp), %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl %esi, %edx ; X86-NEXT: sarl $31, %edx -; X86-NEXT: xorl %edx, %ecx ; X86-NEXT: xorl %edx, %esi +; X86-NEXT: xorl %edx, %ecx ; X86-NEXT: movl %edx, %eax -; X86-NEXT: subl %esi, %eax -; X86-NEXT: sbbl %ecx, %edx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: sbbl %esi, %edx ; X86-NEXT: popl %esi ; X86-NEXT: retl ; @@ -1142,32 +1142,32 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %esi ; X86-NEXT: andl $-16, %esp ; X86-NEXT: subl $16, %esp -; X86-NEXT: movl 36(%ebp), %eax ; X86-NEXT: movl 32(%ebp), %ecx +; X86-NEXT: movl 36(%ebp), %eax +; X86-NEXT: movl 24(%ebp), %edi ; X86-NEXT: movl 28(%ebp), %edx -; X86-NEXT: movl 24(%ebp), %esi -; X86-NEXT: subl 40(%ebp), %esi +; X86-NEXT: subl 40(%ebp), %edi ; X86-NEXT: sbbl 44(%ebp), %edx ; X86-NEXT: sbbl 48(%ebp), %ecx ; X86-NEXT: sbbl 52(%ebp), %eax -; X86-NEXT: movl %eax, %edi -; X86-NEXT: sarl $31, %edi -; X86-NEXT: xorl %edi, %eax -; X86-NEXT: xorl %edi, %ecx -; X86-NEXT: xorl %edi, %edx -; X86-NEXT: xorl %edi, %esi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: subl %esi, %ebx -; X86-NEXT: movl %edi, %esi -; X86-NEXT: sbbl %edx, %esi -; X86-NEXT: movl %edi, %edx +; X86-NEXT: movl %eax, %esi +; X86-NEXT: sarl $31, %esi +; X86-NEXT: xorl %esi, %eax +; X86-NEXT: xorl %esi, %ecx +; X86-NEXT: xorl %esi, %edx +; X86-NEXT: xorl %esi, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: subl %edi, %ebx +; X86-NEXT: movl %esi, %edi +; X86-NEXT: sbbl %edx, %edi +; X86-NEXT: movl %esi, %edx ; X86-NEXT: sbbl %ecx, %edx -; X86-NEXT: sbbl %eax, %edi +; X86-NEXT: sbbl %eax, %esi ; X86-NEXT: movl 8(%ebp), %eax ; X86-NEXT: movl %ebx, (%eax) -; X86-NEXT: movl %esi, 4(%eax) +; X86-NEXT: movl %edi, 4(%eax) ; X86-NEXT: movl %edx, 8(%eax) -; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: movl %esi, 12(%eax) ; X86-NEXT: leal -12(%ebp), %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi @@ -1203,32 +1203,32 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %esi ; X86-NEXT: andl $-16, %esp ; X86-NEXT: subl $16, %esp -; X86-NEXT: movl 36(%ebp), %eax ; X86-NEXT: movl 32(%ebp), %ecx +; X86-NEXT: movl 36(%ebp), %eax +; X86-NEXT: movl 24(%ebp), %edi ; X86-NEXT: movl 28(%ebp), %edx -; X86-NEXT: movl 24(%ebp), %esi -; X86-NEXT: subl 40(%ebp), %esi +; X86-NEXT: subl 40(%ebp), %edi ; X86-NEXT: sbbl 44(%ebp), %edx ; X86-NEXT: sbbl 48(%ebp), %ecx ; X86-NEXT: sbbl 52(%ebp), %eax -; X86-NEXT: movl %eax, %edi -; X86-NEXT: sarl $31, %edi -; X86-NEXT: xorl %edi, %eax -; X86-NEXT: xorl %edi, %ecx -; X86-NEXT: xorl %edi, %edx -; X86-NEXT: xorl %edi, %esi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: subl %esi, %ebx -; X86-NEXT: movl %edi, %esi -; X86-NEXT: sbbl %edx, %esi -; X86-NEXT: movl %edi, %edx +; X86-NEXT: movl %eax, %esi +; X86-NEXT: sarl $31, %esi +; X86-NEXT: xorl %esi, %eax +; X86-NEXT: xorl %esi, %ecx +; X86-NEXT: xorl %esi, %edx +; X86-NEXT: xorl %esi, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: subl %edi, %ebx +; X86-NEXT: movl %esi, %edi +; X86-NEXT: sbbl %edx, %edi +; X86-NEXT: movl %esi, %edx ; X86-NEXT: sbbl %ecx, %edx -; X86-NEXT: sbbl %eax, %edi +; X86-NEXT: sbbl %eax, %esi ; X86-NEXT: movl 8(%ebp), %eax ; X86-NEXT: movl %ebx, (%eax) -; X86-NEXT: movl %esi, 4(%eax) +; X86-NEXT: movl %edi, 4(%eax) ; X86-NEXT: movl %edx, 8(%eax) -; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: movl %esi, 12(%eax) ; X86-NEXT: leal -12(%ebp), %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll index 217cceb..0de308a 100644 --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -1734,20 +1734,20 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind { ; SSE2-LABEL: not_avg_v16i8_wide_constants: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps (%rdi), %xmm1 -; SSE2-NEXT: movdqa (%rsi), %xmm2 +; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax -; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movd %eax, %xmm2 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax ; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax -; SSE2-NEXT: movd %eax, %xmm4 +; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax -; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: movd %eax, %xmm4 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax ; SSE2-NEXT: movd %eax, %xmm5 @@ -1762,6 +1762,9 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind { ; SSE2-NEXT: movd %eax, %xmm8 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax +; SSE2-NEXT: movd %eax, %xmm10 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: decl %eax ; SSE2-NEXT: movd %eax, %xmm9 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax @@ -1771,9 +1774,6 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind { ; SSE2-NEXT: movd %eax, %xmm12 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax -; SSE2-NEXT: movd %eax, %xmm10 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: decl %eax ; SSE2-NEXT: movd %eax, %xmm13 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax @@ -1783,43 +1783,45 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind { ; SSE2-NEXT: movd %eax, %xmm15 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: decl %eax -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,0,0,0] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,0,0] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3] ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,0,0,0] -; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] -; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1] +; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1] ; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE2-NEXT: movapd %xmm4, %xmm5 ; SSE2-NEXT: andpd %xmm1, %xmm5 ; SSE2-NEXT: xorpd %xmm4, %xmm1 ; SSE2-NEXT: psrlw $1, %xmm1 ; SSE2-NEXT: paddw %xmm5, %xmm1 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,0,0,0] +; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3] ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,0,0,0] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] -; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm10[0],xmm0[1] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] -; SSE2-NEXT: movapd %xmm0, %xmm3 -; SSE2-NEXT: andpd %xmm2, %xmm3 -; SSE2-NEXT: xorpd %xmm0, %xmm2 -; SSE2-NEXT: psrlw $1, %xmm2 -; SSE2-NEXT: paddw %xmm3, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] +; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm9[0],xmm2[1] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15] +; SSE2-NEXT: movapd %xmm2, %xmm3 +; SSE2-NEXT: andpd %xmm0, %xmm3 +; SSE2-NEXT: xorpd %xmm2, %xmm0 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddw %xmm3, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: packuswb %xmm0, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; @@ -1829,74 +1831,75 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind { ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; AVX1-NEXT: vpextrd $2, %xmm5, %ecx -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; AVX1-NEXT: vpextrd $2, %xmm4, %eax -; AVX1-NEXT: vpextrw $3, %xmm3, %edx +; AVX1-NEXT: vpextrw $7, %xmm3, %edx +; AVX1-NEXT: vpextrw $6, %xmm3, %ecx +; AVX1-NEXT: vpextrw $5, %xmm3, %eax ; AVX1-NEXT: decl %edx ; AVX1-NEXT: vmovd %edx, %xmm4 -; AVX1-NEXT: vpextrw $2, %xmm3, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm5 -; AVX1-NEXT: vpextrw $1, %xmm3, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm6 -; AVX1-NEXT: vpextrw $0, %xmm3, %edx +; AVX1-NEXT: vpextrw $4, %xmm3, %edx +; AVX1-NEXT: decl %ecx +; AVX1-NEXT: vmovd %ecx, %xmm5 +; AVX1-NEXT: vpextrw $1, %xmm3, %ecx +; AVX1-NEXT: decl %eax +; AVX1-NEXT: vmovd %eax, %xmm6 +; AVX1-NEXT: vpextrw $0, %xmm3, %eax ; AVX1-NEXT: decl %edx ; AVX1-NEXT: vmovd %edx, %xmm7 -; AVX1-NEXT: vpextrw $3, %xmm2, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm8 -; AVX1-NEXT: vpextrw $2, %xmm2, %edx +; AVX1-NEXT: vpextrw $3, %xmm3, %edx +; AVX1-NEXT: decq %rcx +; AVX1-NEXT: vmovq %rcx, %xmm8 +; AVX1-NEXT: vpextrw $2, %xmm3, %ecx +; AVX1-NEXT: decq %rax +; AVX1-NEXT: vmovq %rax, %xmm3 +; AVX1-NEXT: vpextrw $7, %xmm2, %eax ; AVX1-NEXT: decl %edx ; AVX1-NEXT: vmovd %edx, %xmm9 -; AVX1-NEXT: vpextrw $1, %xmm2, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm10 -; AVX1-NEXT: vpextrw $0, %xmm2, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm11 -; AVX1-NEXT: vpextrw $5, %xmm3, %edx +; AVX1-NEXT: vpextrw $6, %xmm2, %edx +; AVX1-NEXT: decl %ecx +; AVX1-NEXT: vmovd %ecx, %xmm10 +; AVX1-NEXT: vpextrw $5, %xmm2, %ecx +; AVX1-NEXT: decl %eax +; AVX1-NEXT: vmovd %eax, %xmm11 +; AVX1-NEXT: vpextrw $4, %xmm2, %eax ; AVX1-NEXT: decl %edx ; AVX1-NEXT: vmovd %edx, %xmm12 -; AVX1-NEXT: vpextrw $4, %xmm3, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm13 -; AVX1-NEXT: vpextrw $5, %xmm2, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm14 -; AVX1-NEXT: vpextrw $4, %xmm2, %edx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm15 -; AVX1-NEXT: vpextrw $7, %xmm3, %edx +; AVX1-NEXT: vpextrw $1, %xmm2, %edx ; AVX1-NEXT: decl %ecx -; AVX1-NEXT: vmovd %ecx, %xmm3 -; AVX1-NEXT: vpextrw $7, %xmm2, %ecx -; AVX1-NEXT: decl %edx -; AVX1-NEXT: vmovd %edx, %xmm2 +; AVX1-NEXT: vmovd %ecx, %xmm13 +; AVX1-NEXT: vpextrw $0, %xmm2, %ecx +; AVX1-NEXT: decl %eax +; AVX1-NEXT: vmovd %eax, %xmm14 +; AVX1-NEXT: vpextrw $3, %xmm2, %eax +; AVX1-NEXT: decq %rdx +; AVX1-NEXT: vmovq %rdx, %xmm15 +; AVX1-NEXT: vpextrw $2, %xmm2, %edx +; AVX1-NEXT: decq %rcx +; AVX1-NEXT: vmovq %rcx, %xmm2 ; AVX1-NEXT: decl %eax ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] ; AVX1-NEXT: vmovd %eax, %xmm5 -; AVX1-NEXT: decl %ecx +; AVX1-NEXT: decl %edx ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3] -; AVX1-NEXT: vmovd %ecx, %xmm7 -; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3] -; AVX1-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm8, %ymm6 -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; AVX1-NEXT: vmovddup {{.*#+}} ymm3 = ymm6[0,0,2,2] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; AVX1-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4] -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7] -; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7] +; AVX1-NEXT: vmovd %edx, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm1 ; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0 diff --git a/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll b/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll index ab9fa22..24d3030 100644 --- a/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll +++ b/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll @@ -48,6 +48,6 @@ return: ; preds = %catch, %entry ; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 ; CHECK-NEXT: .Llsda_begin0: ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long 1 ; CHECK-NEXT: .long .LBB0_[[catch]]@IMGREL diff --git a/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll b/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll index c4c194e..7855ff2 100644 --- a/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll +++ b/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll @@ -121,7 +121,6 @@ define void @f_non_leaf(i32 %x, i32 %y) !prof !14 { ; WIN64-NEXT: # encoding: [0xeb,A] ; WIN64-NEXT: # fixup A - offset: 1, value: foo, kind: FK_PCRel_1 ; WIN64-NEXT: .LBB1_2: # %bb2 -; WIN64-NEXT: nop # encoding: [0x90] ; WIN64-NEXT: .seh_startepilogue ; WIN64-NEXT: popq %rbx # encoding: [0x5b] ; WIN64-NEXT: .seh_endepilogue diff --git a/llvm/test/CodeGen/X86/conditional-tailcall.ll b/llvm/test/CodeGen/X86/conditional-tailcall.ll index 9c1d830..2859a87 100644 --- a/llvm/test/CodeGen/X86/conditional-tailcall.ll +++ b/llvm/test/CodeGen/X86/conditional-tailcall.ll @@ -121,7 +121,6 @@ define void @f_non_leaf(i32 %x, i32 %y) optsize { ; WIN64-NEXT: # encoding: [0xeb,A] ; WIN64-NEXT: # fixup A - offset: 1, value: foo, kind: FK_PCRel_1 ; WIN64-NEXT: .LBB1_2: # %bb2 -; WIN64-NEXT: nop # encoding: [0x90] ; WIN64-NEXT: .seh_startepilogue ; WIN64-NEXT: popq %rbx # encoding: [0x5b] ; WIN64-NEXT: .seh_endepilogue diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll index 661e7bb..455b72d 100644 --- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll +++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll @@ -172,10 +172,9 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind { ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: sbbl %eax, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 52(%ebp), %ecx -; X86-NEXT: movl %ecx, %edx +; X86-NEXT: movl 52(%ebp), %esi +; X86-NEXT: movl %esi, %edx ; X86-NEXT: sarl $31, %edx -; X86-NEXT: movl %ecx, %esi ; X86-NEXT: xorl %edx, %esi ; X86-NEXT: movl 48(%ebp), %ecx ; X86-NEXT: xorl %edx, %ecx @@ -204,45 +203,45 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind { ; X86-NEXT: sete %al ; X86-NEXT: orb %cl, %al ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: bsrl %eax, %edx +; X86-NEXT: bsrl %esi, %edx ; X86-NEXT: xorl $31, %edx -; X86-NEXT: addl $32, %edx -; X86-NEXT: bsrl %esi, %ecx +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: bsrl %eax, %ecx ; X86-NEXT: xorl $31, %ecx +; X86-NEXT: orl $32, %ecx ; X86-NEXT: testl %esi, %esi -; X86-NEXT: cmovel %edx, %ecx +; X86-NEXT: cmovnel %edx, %ecx ; X86-NEXT: bsrl %ebx, %edx ; X86-NEXT: xorl $31, %edx ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: bsrl %edi, %edi ; X86-NEXT: xorl $31, %edi -; X86-NEXT: addl $32, %edi +; X86-NEXT: orl $32, %edi ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: testl %ebx, %ebx ; X86-NEXT: cmovnel %edx, %edi -; X86-NEXT: addl $64, %edi +; X86-NEXT: orl $64, %edi ; X86-NEXT: movl %eax, %edx ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: orl %esi, %edx ; X86-NEXT: cmovnel %ecx, %edi -; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: xorl $31, %edx -; X86-NEXT: addl $32, %edx ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: bsrl %eax, %ecx +; X86-NEXT: bsrl %eax, %edx +; X86-NEXT: xorl $31, %edx +; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload ; X86-NEXT: xorl $31, %ecx +; X86-NEXT: orl $32, %ecx ; X86-NEXT: testl %eax, %eax -; X86-NEXT: cmovel %edx, %ecx +; X86-NEXT: cmovnel %edx, %ecx ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload ; X86-NEXT: bsrl %ebx, %esi ; X86-NEXT: xorl $31, %esi ; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload ; X86-NEXT: xorl $31, %edx -; X86-NEXT: addl $32, %edx +; X86-NEXT: orl $32, %edx ; X86-NEXT: testl %ebx, %ebx ; X86-NEXT: cmovnel %esi, %edx -; X86-NEXT: addl $64, %edx +; X86-NEXT: orl $64, %edx ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload ; X86-NEXT: orl %eax, %esi ; X86-NEXT: cmovnel %ecx, %edx @@ -380,9 +379,9 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind { ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: adcl $-1, %eax ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: adcl $-1, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload +; X86-NEXT: adcl $-1, %ecx +; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: adcl $-1, %ecx ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll index 370e1c6..859e924 100644 --- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll +++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll @@ -173,17 +173,17 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind { ; X86-NEXT: xorl $31, %edx ; X86-NEXT: bsrl 48(%ebp), %ecx ; X86-NEXT: xorl $31, %ecx -; X86-NEXT: addl $32, %ecx +; X86-NEXT: orl $32, %ecx ; X86-NEXT: testl %esi, %esi ; X86-NEXT: cmovnel %edx, %ecx ; X86-NEXT: bsrl %edi, %edx ; X86-NEXT: xorl $31, %edx ; X86-NEXT: bsrl %ebx, %eax ; X86-NEXT: xorl $31, %eax -; X86-NEXT: addl $32, %eax +; X86-NEXT: orl $32, %eax ; X86-NEXT: testl %edi, %edi ; X86-NEXT: cmovnel %edx, %eax -; X86-NEXT: addl $64, %eax +; X86-NEXT: orl $64, %eax ; X86-NEXT: movl 48(%ebp), %edx ; X86-NEXT: orl %esi, %edx ; X86-NEXT: cmovnel %ecx, %eax @@ -193,7 +193,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind { ; X86-NEXT: movl 32(%ebp), %ecx ; X86-NEXT: bsrl %ecx, %ecx ; X86-NEXT: xorl $31, %ecx -; X86-NEXT: addl $32, %ecx +; X86-NEXT: orl $32, %ecx ; X86-NEXT: testl %ebx, %ebx ; X86-NEXT: cmovnel %edx, %ecx ; X86-NEXT: movl 28(%ebp), %edi @@ -201,10 +201,10 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind { ; X86-NEXT: xorl $31, %esi ; X86-NEXT: bsrl 24(%ebp), %edx ; X86-NEXT: xorl $31, %edx -; X86-NEXT: addl $32, %edx +; X86-NEXT: orl $32, %edx ; X86-NEXT: testl %edi, %edi ; X86-NEXT: cmovnel %esi, %edx -; X86-NEXT: addl $64, %edx +; X86-NEXT: orl $64, %edx ; X86-NEXT: movl 32(%ebp), %esi ; X86-NEXT: orl %ebx, %esi ; X86-NEXT: cmovnel %ecx, %edx diff --git a/llvm/test/CodeGen/X86/freeze-vector.ll b/llvm/test/CodeGen/X86/freeze-vector.ll index 0f66d42..953a5e7 100644 --- a/llvm/test/CodeGen/X86/freeze-vector.ll +++ b/llvm/test/CodeGen/X86/freeze-vector.ll @@ -171,15 +171,15 @@ define void @freeze_extractelement(ptr %origin0, ptr %origin1, ptr %dst) nounwin ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: vmovdqa (%ecx), %xmm0 -; X86-NEXT: vpand (%edx), %xmm0, %xmm0 +; X86-NEXT: vmovdqa (%edx), %xmm0 +; X86-NEXT: vpand (%ecx), %xmm0, %xmm0 ; X86-NEXT: vpextrb $6, %xmm0, (%eax) ; X86-NEXT: retl ; ; X64-LABEL: freeze_extractelement: ; X64: # %bb.0: -; X64-NEXT: vmovdqa (%rsi), %xmm0 -; X64-NEXT: vpand (%rdi), %xmm0, %xmm0 +; X64-NEXT: vmovdqa (%rdi), %xmm0 +; X64-NEXT: vpand (%rsi), %xmm0, %xmm0 ; X64-NEXT: vpextrb $6, %xmm0, (%rdx) ; X64-NEXT: retq %i0 = load <16 x i8>, ptr %origin0 @@ -198,8 +198,8 @@ define void @freeze_extractelement_escape(ptr %origin0, ptr %origin1, ptr %dst, ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: vmovdqa (%edx), %xmm0 -; X86-NEXT: vpand (%esi), %xmm0, %xmm0 +; X86-NEXT: vmovdqa (%esi), %xmm0 +; X86-NEXT: vpand (%edx), %xmm0, %xmm0 ; X86-NEXT: vmovdqa %xmm0, (%ecx) ; X86-NEXT: vpextrb $6, %xmm0, (%eax) ; X86-NEXT: popl %esi @@ -207,8 +207,8 @@ define void @freeze_extractelement_escape(ptr %origin0, ptr %origin1, ptr %dst, ; ; X64-LABEL: freeze_extractelement_escape: ; X64: # %bb.0: -; X64-NEXT: vmovdqa (%rsi), %xmm0 -; X64-NEXT: vpand (%rdi), %xmm0, %xmm0 +; X64-NEXT: vmovdqa (%rdi), %xmm0 +; X64-NEXT: vpand (%rsi), %xmm0, %xmm0 ; X64-NEXT: vmovdqa %xmm0, (%rcx) ; X64-NEXT: vpextrb $6, %xmm0, (%rdx) ; X64-NEXT: retq @@ -239,8 +239,8 @@ define void @freeze_extractelement_extra_use(ptr %origin0, ptr %origin1, i64 %id ; X86-NEXT: movl 32(%ebp), %edx ; X86-NEXT: movl 12(%ebp), %esi ; X86-NEXT: movl 8(%ebp), %edi -; X86-NEXT: vmovaps (%esi), %xmm0 -; X86-NEXT: vandps (%edi), %xmm0, %xmm0 +; X86-NEXT: vmovaps (%edi), %xmm0 +; X86-NEXT: vandps (%esi), %xmm0, %xmm0 ; X86-NEXT: vmovaps %xmm0, (%esp) ; X86-NEXT: movzbl (%esp,%ecx), %ecx ; X86-NEXT: cmpb (%esp,%eax), %cl @@ -255,8 +255,8 @@ define void @freeze_extractelement_extra_use(ptr %origin0, ptr %origin1, i64 %id ; X64: # %bb.0: ; X64-NEXT: andl $15, %ecx ; X64-NEXT: andl $15, %edx -; X64-NEXT: vmovaps (%rsi), %xmm0 -; X64-NEXT: vandps (%rdi), %xmm0, %xmm0 +; X64-NEXT: vmovaps (%rdi), %xmm0 +; X64-NEXT: vandps (%rsi), %xmm0, %xmm0 ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: movzbl -24(%rsp,%rdx), %eax ; X64-NEXT: cmpb -24(%rsp,%rcx), %al diff --git a/llvm/test/CodeGen/X86/noreturn-call-win64.ll b/llvm/test/CodeGen/X86/noreturn-call-win64.ll index 57aa022..13be1f13 100644 --- a/llvm/test/CodeGen/X86/noreturn-call-win64.ll +++ b/llvm/test/CodeGen/X86/noreturn-call-win64.ll @@ -111,3 +111,15 @@ declare dso_local void @"??1MakeCleanup@@QEAA@XZ"(ptr) ; CHECK: # %unreachable ; CHECK: int3 ; CHECK: .seh_handlerdata + + +define dso_local void @last_call_no_return() { + call void @abort1() + unreachable +} + +; CHECK-LABEL: last_call_no_return: +; CHECK: callq abort1 +; CHECK-NEXT: int3 +; CHECK-NEXT: .seh_endproc + diff --git a/llvm/test/CodeGen/X86/pr149841.ll b/llvm/test/CodeGen/X86/pr149841.ll new file mode 100644 index 0000000..c17a617 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr149841.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +%struct.bar = type { [5 x ptr] } + +@global = external dso_local global %struct.bar + +define i1 @foo(ptr %arg, i1 %arg1) { +; CHECK-LABEL: foo: +; CHECK: # %bb.0: # %bb +; CHECK-NEXT: cmpq $global+1, %rdi +; CHECK-NEXT: setne %al +; CHECK-NEXT: andb %sil, %al +; CHECK-NEXT: retq +bb: + #dbg_value(ptr @global, !3, !DIExpression(), !5) + %icmp = icmp ne ptr %arg, getelementptr inbounds nuw (i8, ptr @global, i64 1) + %select = select i1 %arg1, i1 %icmp, i1 false + ret i1 %select +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2} + +!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, isOptimized: false, runtimeVersion: 0, emissionKind: NoDebug) +!1 = !DIFile(filename: "x.c", directory: "/proc/self/cwd") +!2 = !{i32 2, !"Debug Info Version", i32 3} +!3 = !DILocalVariable(name: "x", arg: 1, scope: !4, file: !1) +!4 = distinct !DISubprogram(name: "x", scope: null, file: !1, spFlags: DISPFlagDefinition, unit: !0) +!5 = !DILocation(line: 0, scope: !4) + diff --git a/llvm/test/CodeGen/X86/seh-catch-all.ll b/llvm/test/CodeGen/X86/seh-catch-all.ll index 5250bb9..4e25aab 100644 --- a/llvm/test/CodeGen/X86/seh-catch-all.ll +++ b/llvm/test/CodeGen/X86/seh-catch-all.ll @@ -40,7 +40,7 @@ catchall: ; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 ; CHECK-NEXT: .Llsda_begin0: ; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL -; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL+1 +; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL ; CHECK-NEXT: .long 1 ; CHECK-NEXT: .long .LBB0_2@IMGREL ; CHECK-NEXT: .Llsda_end0: diff --git a/llvm/test/CodeGen/X86/seh-catchpad.ll b/llvm/test/CodeGen/X86/seh-catchpad.ll index d958580..cb85f39 100644 --- a/llvm/test/CodeGen/X86/seh-catchpad.ll +++ b/llvm/test/CodeGen/X86/seh-catchpad.ll @@ -123,23 +123,23 @@ __except.ret: ; preds = %catch.dispatch.7 ; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 ; CHECK-NEXT: .Llsda_begin0: ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long 1 ; CHECK-NEXT: .long .LBB1_[[except1bb]]@IMGREL ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long "?filt$0@0@main@@"@IMGREL ; CHECK-NEXT: .long .LBB1_[[except2bb]]@IMGREL ; CHECK-NEXT: .long .Ltmp2@IMGREL -; CHECK-NEXT: .long .Ltmp3@IMGREL+1 +; CHECK-NEXT: .long .Ltmp3@IMGREL ; CHECK-NEXT: .long "?dtor$[[finbb:[0-9]+]]@?0?main@4HA"@IMGREL ; CHECK-NEXT: .long 0 ; CHECK-NEXT: .long .Ltmp2@IMGREL -; CHECK-NEXT: .long .Ltmp3@IMGREL+1 +; CHECK-NEXT: .long .Ltmp3@IMGREL ; CHECK-NEXT: .long "?filt$0@0@main@@"@IMGREL ; CHECK-NEXT: .long .LBB1_3@IMGREL ; CHECK-NEXT: .long .Ltmp6@IMGREL -; CHECK-NEXT: .long .Ltmp7@IMGREL+1 +; CHECK-NEXT: .long .Ltmp7@IMGREL ; CHECK-NEXT: .long "?filt$0@0@main@@"@IMGREL ; CHECK-NEXT: .long .LBB1_3@IMGREL ; CHECK-NEXT: .Llsda_end0: diff --git a/llvm/test/CodeGen/X86/seh-except-finally.ll b/llvm/test/CodeGen/X86/seh-except-finally.ll index 7f70655..539d776 100644 --- a/llvm/test/CodeGen/X86/seh-except-finally.ll +++ b/llvm/test/CodeGen/X86/seh-except-finally.ll @@ -83,15 +83,15 @@ __try.cont: ; preds = %__except, %invoke.c ; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 ; CHECK-NEXT: .Llsda_begin0: ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long "?dtor$2@?0?use_both@4HA"@IMGREL ; CHECK-NEXT: .long 0 ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long "?filt$0@0@use_both@@"@IMGREL ; CHECK-NEXT: .long .LBB0_{{[0-9]+}}@IMGREL ; CHECK-NEXT: .long .Ltmp4@IMGREL -; CHECK-NEXT: .long .Ltmp5@IMGREL+1 +; CHECK-NEXT: .long .Ltmp5@IMGREL ; CHECK-NEXT: .long "?filt$0@0@use_both@@"@IMGREL ; CHECK-NEXT: .long .LBB0_{{[0-9]+}}@IMGREL ; CHECK-NEXT: .Llsda_end0: diff --git a/llvm/test/CodeGen/X86/seh-finally.ll b/llvm/test/CodeGen/X86/seh-finally.ll index 41823df..6093e5e 100644 --- a/llvm/test/CodeGen/X86/seh-finally.ll +++ b/llvm/test/CodeGen/X86/seh-finally.ll @@ -30,7 +30,7 @@ lpad: ; preds = %entry ; X64-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 # Number of call sites ; X64-NEXT: .Llsda_begin0: ; X64-NEXT: .long .Ltmp0@IMGREL # LabelStart -; X64-NEXT: .long .Ltmp1@IMGREL+1 # LabelEnd +; X64-NEXT: .long .Ltmp1@IMGREL # LabelEnd ; X64-NEXT: .long "?dtor$2@?0?main@4HA"@IMGREL # FinallyFunclet ; X64-NEXT: .long 0 # Null ; X64-NEXT: .Llsda_end0: diff --git a/llvm/test/CodeGen/X86/seh-safe-div.ll b/llvm/test/CodeGen/X86/seh-safe-div.ll index 542d9f6..20169f8 100644 --- a/llvm/test/CodeGen/X86/seh-safe-div.ll +++ b/llvm/test/CodeGen/X86/seh-safe-div.ll @@ -60,6 +60,7 @@ __try.cont: ; CHECK: .Ltmp0: ; CHECK: leaq [[rloc:.*\(%rbp\)]], %rcx ; CHECK: callq try_body +; CHECK: nop ; CHECK-NEXT: .Ltmp1 ; CHECK: [[cont_bb:\.LBB0_[0-9]+]]: ; CHECK: movl [[rloc]], %eax @@ -82,11 +83,11 @@ __try.cont: ; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 ; CHECK-NEXT: .Llsda_begin0: ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long safe_div_filt0@IMGREL ; CHECK-NEXT: .long [[handler0]]@IMGREL ; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 +; CHECK-NEXT: .long .Ltmp1@IMGREL ; CHECK-NEXT: .long safe_div_filt1@IMGREL ; CHECK-NEXT: .long [[handler1]]@IMGREL ; CHECK-NEXT: .Llsda_end0: diff --git a/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll index 2c576df..5a6aeb6 100644 --- a/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll +++ b/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll @@ -56,8 +56,8 @@ declare dso_local void @printf(ptr, ...) ; CHECK-NEXT:$ip2state$test: ; CHECK-NEXT: .long .Lfunc_begin0@IMGREL # IP ; CHECK-NEXT: .long -1 # ToState -; CHECK-NEXT: .long .Ltmp0@IMGREL+1 # IP +; CHECK-NEXT: .long .Ltmp0@IMGREL # IP ; CHECK-NEXT: .long 0 # ToState -; CHECK-NEXT: .long .Ltmp1@IMGREL+1 # IP +; CHECK-NEXT: .long .Ltmp1@IMGREL # IP ; CHECK-NEXT: .long -1 # ToState diff --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll index d2b292f..2ac2be5 100644 --- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll +++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll @@ -119,8 +119,8 @@ define void @failing(ptr %0, ptr %1) nounwind { ; CHECK-AVX2-NEXT: .LBB0_2: # %vector.body ; CHECK-AVX2-NEXT: # Parent Loop BB0_1 Depth=1 ; CHECK-AVX2-NEXT: # => This Inner Loop Header: Depth=2 -; CHECK-AVX2-NEXT: vmovdqu 1024(%rdx,%rsi), %ymm5 -; CHECK-AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6 +; CHECK-AVX2-NEXT: vmovdqu 1024(%rdx,%rsi), %xmm5 +; CHECK-AVX2-NEXT: vmovdqu 1040(%rdx,%rsi), %xmm6 ; CHECK-AVX2-NEXT: vpextrq $1, %xmm5, %rdi ; CHECK-AVX2-NEXT: vpextrq $1, %xmm6, %r8 ; CHECK-AVX2-NEXT: vmovq %xmm5, %r9 diff --git a/llvm/test/CodeGen/X86/stack-coloring-wineh.ll b/llvm/test/CodeGen/X86/stack-coloring-wineh.ll index e2de2ff..74fe07e 100644 --- a/llvm/test/CodeGen/X86/stack-coloring-wineh.ll +++ b/llvm/test/CodeGen/X86/stack-coloring-wineh.ll @@ -84,12 +84,12 @@ define void @pr66984(ptr %arg) personality ptr @__CxxFrameHandler3 { ; X86_64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X86_64-NEXT: .Ltmp0: ; X86_64-NEXT: callq throw +; X86_64-NEXT: nop ; X86_64-NEXT: .Ltmp1: ; X86_64-NEXT: # %bb.1: # %bb14 ; X86_64-NEXT: .LBB0_3: # Block address taken ; X86_64-NEXT: # %exit ; X86_64-NEXT: $ehgcr_0_3: -; X86_64-NEXT: nop ; X86_64-NEXT: .seh_startepilogue ; X86_64-NEXT: addq $64, %rsp ; X86_64-NEXT: popq %rbp diff --git a/llvm/test/CodeGen/X86/taildup-heapallocsite.ll b/llvm/test/CodeGen/X86/taildup-heapallocsite.ll index 967e125..f3bef47 100644 --- a/llvm/test/CodeGen/X86/taildup-heapallocsite.ll +++ b/llvm/test/CodeGen/X86/taildup-heapallocsite.ll @@ -37,9 +37,11 @@ cond.end: ; preds = %entry, %cond.true ; CHECK: testq ; CHECK: je ; CHECK: callq alloc +; CHECK-NEXT: nop ; CHECK-NEXT: [[L1:.Ltmp[0-9]+]] ; CHECK: jmp f2 # TAILCALL ; CHECK: callq alloc +; CHECK-NEXT: nop ; CHECK-NEXT: [[L3:.Ltmp[0-9]+]] ; CHECK: jmp f2 # TAILCALL diff --git a/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll b/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll index bfb9c43..0bf8370 100644 --- a/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll +++ b/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll @@ -103,15 +103,15 @@ handler2: ; X64: $ip2state$try_in_catch: ; X64-NEXT: .long .Lfunc_begin0@IMGREL ; X64-NEXT: .long -1 -; X64-NEXT: .long .Ltmp0@IMGREL+1 +; X64-NEXT: .long .Ltmp0@IMGREL ; X64-NEXT: .long 0 -; X64-NEXT: .long .Ltmp1@IMGREL+1 +; X64-NEXT: .long .Ltmp1@IMGREL ; X64-NEXT: .long -1 ; X64-NEXT: .long "?catch$2@?0?try_in_catch@4HA"@IMGREL ; X64-NEXT: .long 1 -; X64-NEXT: .long .Ltmp2@IMGREL+1 +; X64-NEXT: .long .Ltmp2@IMGREL ; X64-NEXT: .long 2 -; X64-NEXT: .long .Ltmp3@IMGREL+1 +; X64-NEXT: .long .Ltmp3@IMGREL ; X64-NEXT: .long 1 ; X64-NEXT: .long "?catch$4@?0?try_in_catch@4HA"@IMGREL ; X64-NEXT: .long 3 diff --git a/llvm/test/CodeGen/X86/win-catchpad.ll b/llvm/test/CodeGen/X86/win-catchpad.ll index 2491946..62ea510 100644 --- a/llvm/test/CodeGen/X86/win-catchpad.ll +++ b/llvm/test/CodeGen/X86/win-catchpad.ll @@ -214,9 +214,9 @@ try.cont: ; X64: $ip2state$try_catch_catch: ; X64-NEXT: .long .Lfunc_begin0@IMGREL ; X64-NEXT: .long -1 -; X64-NEXT: .long .Ltmp0@IMGREL+1 +; X64-NEXT: .long .Ltmp0@IMGREL ; X64-NEXT: .long 0 -; X64-NEXT: .long .Ltmp1@IMGREL+1 +; X64-NEXT: .long .Ltmp1@IMGREL ; X64-NEXT: .long -1 ; X64-NEXT: .long "?catch$[[catch1bb]]@?0?try_catch_catch@4HA"@IMGREL ; X64-NEXT: .long 1 @@ -357,9 +357,9 @@ try.cont: ; X64-LABEL: $ip2state$branch_to_normal_dest: ; X64-NEXT: .long .Lfunc_begin1@IMGREL ; X64-NEXT: .long -1 -; X64-NEXT: .long .Ltmp[[before_call]]@IMGREL+1 +; X64-NEXT: .long .Ltmp[[before_call]]@IMGREL ; X64-NEXT: .long 0 -; X64-NEXT: .long .Ltmp[[after_call]]@IMGREL+1 +; X64-NEXT: .long .Ltmp[[after_call]]@IMGREL ; X64-NEXT: .long -1 ; X64-NEXT: .long "?catch$[[catchbb]]@?0?branch_to_normal_dest@4HA"@IMGREL ; X64-NEXT: .long 1 diff --git a/llvm/test/CodeGen/X86/win-cleanuppad.ll b/llvm/test/CodeGen/X86/win-cleanuppad.ll index e3f7f5b..e9265a1 100644 --- a/llvm/test/CodeGen/X86/win-cleanuppad.ll +++ b/llvm/test/CodeGen/X86/win-cleanuppad.ll @@ -191,7 +191,7 @@ cleanup.outer: ; preds = %invoke.cont.1, %c ; X64-NEXT: .long 1 ; X64-NEXT: .long .Ltmp6@IMGREL ; X64-NEXT: .long 0 -; X64-NEXT: .long .Ltmp7@IMGREL+1 +; X64-NEXT: .long .Ltmp7@IMGREL ; X64-NEXT: .long -1 attributes #0 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/X86/win32-eh-states.ll b/llvm/test/CodeGen/X86/win32-eh-states.ll index 42ae5b0..e645199 100644 --- a/llvm/test/CodeGen/X86/win32-eh-states.ll +++ b/llvm/test/CodeGen/X86/win32-eh-states.ll @@ -86,11 +86,11 @@ catch.7: ; X64-LABEL: $ip2state$f: ; X64-NEXT: .long .Lfunc_begin0@IMGREL ; X64-NEXT: .long -1 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long 0 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long 1 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long -1 ; X64-NEXT: .long "?catch${{.*}}@?0?f@4HA"@IMGREL ; X64-NEXT: .long 2 @@ -189,15 +189,15 @@ unreachable: ; preds = %entry ; X64-LABEL: $ip2state$g: ; X64-NEXT: .long .Lfunc_begin1@IMGREL ; X64-NEXT: .long -1 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long 1 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long -1 ; X64-NEXT: .long "?catch${{.*}}@?0?g@4HA"@IMGREL ; X64-NEXT: .long 2 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long 3 -; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1 +; X64-NEXT: .long .Ltmp{{.*}}@IMGREL ; X64-NEXT: .long 2 diff --git a/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll b/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll index bc5be7a..75f156f 100644 --- a/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll +++ b/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll @@ -8,8 +8,8 @@ define i32 @foobar() gc "statepoint-example" personality ptr @__gxx_personality_ ; CHECK-NEXT: .seh_stackalloc 40 ; CHECK-NEXT: .seh_endprologue ; CHECK-NEXT: callq bar -; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: nop +; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: .seh_startepilogue ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .seh_endepilogue diff --git a/llvm/test/CodeGen/X86/wineh-coreclr.ll b/llvm/test/CodeGen/X86/wineh-coreclr.ll index baf5eaa..a3d0fde 100644 --- a/llvm/test/CodeGen/X86/wineh-coreclr.ll +++ b/llvm/test/CodeGen/X86/wineh-coreclr.ll @@ -38,6 +38,7 @@ entry: ; CHECK: [[test1_before_f1:.+]]: ; CHECK-NEXT: movl $1, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test1_after_f1:.+]]: invoke void @f(i32 1) to label %inner_try unwind label %finally @@ -46,6 +47,7 @@ inner_try: ; CHECK: [[test1_before_f2:.+]]: ; CHECK-NEXT: movl $2, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test1_after_f2:.+]]: invoke void @f(i32 2) to label %finally.clone unwind label %exn.dispatch @@ -69,6 +71,7 @@ catch1: ; CHECK: [[test1_before_f3:.+]]: ; CHECK-NEXT: movl $3, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test1_after_f3:.+]]: invoke void @f(i32 3) [ "funclet"(token %catch.pad1) ] to label %catch1.ret unwind label %finally @@ -92,6 +95,7 @@ catch2: ; CHECK: [[test1_before_f4:.+]]: ; CHECK-NEXT: movl $4, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test1_after_f4:.+]]: invoke void @f(i32 4) [ "funclet"(token %catch.pad2) ] to label %try_in_catch unwind label %finally @@ -100,6 +104,7 @@ try_in_catch: ; CHECK: [[test1_before_f5:.+]]: ; CHECK-NEXT: movl $5, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test1_after_f5:.+]]: invoke void @f(i32 5) [ "funclet"(token %catch.pad2) ] to label %catch2.ret unwind label %fault @@ -116,6 +121,7 @@ fault: ; CHECK: [[test1_before_f6:.+]]: ; CHECK-NEXT: movl $6, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test1_after_f6:.+]]: invoke void @f(i32 6) [ "funclet"(token %fault.pad) ] to label %fault.ret unwind label %finally @@ -312,6 +318,7 @@ unreachable: ; CHECK: [[test2_before_f1:.+]]: ; CHECK-NEXT: movl $1, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test2_after_f1:.+]]: ; CHECK: .seh_proc [[test2_catch1:[^ ]+]] ; CHECK: .seh_proc [[test2_catch2:[^ ]+]] @@ -320,6 +327,7 @@ unreachable: ; CHECK: [[test2_before_f2:.+]]: ; CHECK-NEXT: movl $2, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test2_after_f2:.+]]: ; CHECK: int3 ; CHECK: [[test2_end:.*func_end.*]]: @@ -448,6 +456,7 @@ entry: ; CHECK: [[test3_before_f1:.+]]: ; CHECK-NEXT: movl $1, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test3_after_f1:.+]]: invoke void @f(i32 1) to label %exit unwind label %fault1 @@ -474,6 +483,7 @@ fault4: ; CHECK: [[test3_before_f6:.+]]: ; CHECK-NEXT: movl $6, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test3_after_f6:.+]]: invoke void @f(i32 6) ["funclet"(token %fault.pad4)] to label %fault4.cont unwind label %exn.dispatch1 @@ -482,6 +492,7 @@ fault4.cont: ; CHECK: [[test3_before_f7:.+]]: ; CHECK-NEXT: movl $7, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test3_after_f7:.+]]: invoke void @f(i32 7) ["funclet"(token %fault.pad4)] to label %unreachable unwind label %fault5 @@ -512,6 +523,7 @@ unreachable: ; CHECK: [[test3_before_f4:.+]]: ; CHECK-NEXT: movl $4, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test3_after_f4:.+]]: ; CHECK: int3 ; CHECK: .seh_proc [[test3_fault2:[^ ]+]] @@ -520,6 +532,7 @@ unreachable: ; CHECK: [[test3_before_f3:.+]]: ; CHECK-NEXT: movl $3, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test3_after_f3:.+]]: ; CHECK: int3 ; CHECK: .seh_proc [[test3_fault1:[^ ]+]] @@ -528,6 +541,7 @@ unreachable: ; CHECK: [[test3_before_f2:.+]]: ; CHECK-NEXT: movl $2, %ecx ; CHECK-NEXT: callq f +; CHECK-NEXT: nop ; CHECK-NEXT: [[test3_after_f2:.+]]: ; CHECK: int3 ; CHECK: [[test3_end:.*func_end.*]]: diff --git a/llvm/test/CodeGen/XCore/exception.ll b/llvm/test/CodeGen/XCore/exception.ll index f222297..bb5f3f4 100644 --- a/llvm/test/CodeGen/XCore/exception.ll +++ b/llvm/test/CodeGen/XCore/exception.ll @@ -60,7 +60,7 @@ entry: ; CHECK: [[PRE_G:.L[a-zA-Z0-9_]+]] ; CHECK: bl g ; CHECK: [[POST_G:.L[a-zA-Z0-9_]+]] -; CHECK: [[RETURN:.L[a-zA-Z0-9_]+]] +; CHECK: [[RETURN:^.L[a-zA-Z0-9_]+]] ; CHECK: ldw r6, sp[1] ; CHECK: ldw r5, sp[2] ; CHECK: ldw r4, sp[3] diff --git a/llvm/test/DebugInfo/Generic/mixed-source.ll b/llvm/test/DebugInfo/Generic/mixed-source.ll index d5586f8..ee3598f 100644 --- a/llvm/test/DebugInfo/Generic/mixed-source.ll +++ b/llvm/test/DebugInfo/Generic/mixed-source.ll @@ -5,36 +5,66 @@ ; CHECK: include_directories[ 0] = "dir" ; CHECK-NEXT: file_names[ 0]: +; CHECK-NEXT: name: "main.c" +; CHECK-NEXT: dir_index: 0 +; CHECK-NOT: source: +; CHECK-NEXT: file_names[ 1]: ; CHECK-NEXT: name: "foo.c" ; CHECK-NEXT: dir_index: 0 ; CHECK-NEXT: source: "void foo() { }\n" -; CHECK-NEXT: file_names[ 1]: -; CHECK-NEXT: name: "bar.h" +; CHECK-NEXT: file_names[ 2]: +; CHECK-NEXT: name: "newline.h" +; CHECK-NEXT: dir_index: 0 +; CHECK-NEXT: source: "\n" +; CHECK-NEXT: file_names[ 3]: +; CHECK-NEXT: name: "empty.h" +; CHECK-NEXT: dir_index: 0 +; CHECK-NEXT: source: "\n" +; CHECK-NEXT: file_names[ 4]: +; CHECK-NEXT: name: "absent.h" ; CHECK-NEXT: dir_index: 0 ; CHECK-NOT: source: ; Test that DIFiles mixing source and no-source within a DICompileUnit works. -define dso_local void @foo() !dbg !5 { +define dso_local void @foo() !dbg !6 { ret void, !dbg !7 } -define dso_local void @bar() !dbg !6 { - ret void, !dbg !8 +define dso_local void @newline() !dbg !9 { + ret void, !dbg !10 } -!llvm.dbg.cu = !{!4} +define dso_local void @empty() !dbg !12 { + ret void, !dbg !13 +} + +define dso_local void @absent() !dbg !15 { + ret void, !dbg !16 +} + +!llvm.dbg.cu = !{!2} !llvm.module.flags = !{!0, !1} !0 = !{i32 2, !"Dwarf Version", i32 5} !1 = !{i32 2, !"Debug Info Version", i32 3} -!2 = !DIFile(filename: "foo.c", directory: "dir", source: "void foo() { }\0A") -!3 = !DIFile(filename: "bar.h", directory: "dir") +!2 = distinct !DICompileUnit(language: DW_LANG_C99, emissionKind: FullDebug, file: !4) +!3 = !DISubroutineType(types: !{}) +!4 = !DIFile(filename: "main.c", directory: "dir") + +!5 = !DIFile(filename: "foo.c", directory: "dir", source: "void foo() { }\0A") +!6 = distinct !DISubprogram(name: "foo", file: !5, line: 1, type: !3, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !2) +!7 = !DILocation(line: 1, scope: !6) + +!8 = !DIFile(filename: "newline.h", directory: "dir", source: "\0A") +!9 = distinct !DISubprogram(name: "newline", file: !8, line: 1, type: !3, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !2) +!10 = !DILocation(line: 1, scope: !9) + +!11 = !DIFile(filename: "empty.h", directory: "dir", source: "") +!12 = distinct !DISubprogram(name: "empty", file: !11, line: 1, type: !3, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !2) +!13 = !DILocation(line: 1, scope: !12) -!4 = distinct !DICompileUnit(language: DW_LANG_C99, emissionKind: FullDebug, file: !2) -!5 = distinct !DISubprogram(name: "foo", file: !2, line: 1, type: !9, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !4) -!6 = distinct !DISubprogram(name: "bar", file: !3, line: 1, type: !9, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !4) -!7 = !DILocation(line: 1, scope: !5) -!8 = !DILocation(line: 1, scope: !6) -!9 = !DISubroutineType(types: !{}) +!14 = !DIFile(filename: "absent.h", directory: "dir") +!15 = distinct !DISubprogram(name: "absent", file: !14, line: 1, type: !3, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !2) +!16 = !DILocation(line: 1, scope: !15) diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_smem.s b/llvm/test/MC/AMDGPU/gfx1250_asm_smem.s index 899c4c7..800f662 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_smem.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_smem.s @@ -12,3 +12,30 @@ s_buffer_load_i8 s5, s[4:7], s0 nv // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: nv is not supported on this GPU // GFX12-ERR-NEXT:{{^}}s_buffer_load_i8 s5, s[4:7], s0 nv // GFX12-ERR-NEXT:{{^}} ^ + +s_load_b32 s4, s[2:3], 0xa scale_offset +// GFX1250: s_load_b32 s4, s[2:3], 0xa scale_offset ; encoding: [0x01,0x01,0x00,0xf4,0x0a,0x00,0x00,0xf9] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}s_load_b32 s4, s[2:3], 0xa scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +s_load_b32 s4, s[2:3], 0xa scale_offset nv +// GFX1250: s_load_b32 s4, s[2:3], 0xa scale_offset nv ; encoding: [0x01,0x01,0x10,0xf4,0x0a,0x00,0x00,0xf9] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}s_load_b32 s4, s[2:3], 0xa scale_offset nv +// GFX12-ERR-NEXT:{{^}} ^ +// GFX12-ERR-NEXT: error: nv is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}s_load_b32 s4, s[2:3], 0xa scale_offset nv +// GFX12-ERR-NEXT:{{^}} ^ + +s_load_b32 s4, s[2:3], s5 offset:32 scale_offset +// GFX1250: s_load_b32 s4, s[2:3], s5 offset:0x20 scale_offset ; encoding: [0x01,0x01,0x00,0xf4,0x20,0x00,0x00,0x0b] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}s_load_b32 s4, s[2:3], s5 offset:32 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +s_load_b32 s4, s[2:3], m0 offset:32 scale_offset +// GFX1250: s_load_b32 s4, s[2:3], m0 offset:0x20 scale_offset ; encoding: [0x01,0x01,0x00,0xf4,0x20,0x00,0x00,0xfb] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}s_load_b32 s4, s[2:3], m0 offset:32 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_smem_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_smem_err.s new file mode 100644 index 0000000..e57d4fc76 --- /dev/null +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_smem_err.s @@ -0,0 +1,16 @@ +// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s + +s_buffer_load_i8 s5, s[4:7], s0 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}s_buffer_load_i8 s5, s[4:7], s0 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +s_prefetch_data s[18:19], 100, s10, 7 nv +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction +// GFX1250-ERR-NEXT:{{^}}s_prefetch_data s[18:19], 100, s10, 7 nv +// GFX1250-ERR-NEXT:{{^}} ^ + +s_prefetch_data s[18:19], 100, s10, 7 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction +// GFX1250-ERR-NEXT:{{^}}s_prefetch_data s[18:19], 100, s10, 7 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf_err.s new file mode 100644 index 0000000..731eb67 --- /dev/null +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf_err.s @@ -0,0 +1,6 @@ +// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s + +buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s index 488040e..d3a49f2 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat.s @@ -61,6 +61,54 @@ scratch_load_b32 v5, v2, off nv // GFX12-ERR-NEXT:{{^}}scratch_load_b32 v5, v2, off nv // GFX12-ERR-NEXT:{{^}} ^ +global_load_b32 v5, v1, s[2:3] offset:32 scale_offset +// GFX1250: global_load_b32 v5, v1, s[2:3] offset:32 scale_offset ; encoding: [0x02,0x00,0x05,0xee,0x05,0x00,0x01,0x00,0x01,0x20,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}global_load_b32 v5, v1, s[2:3] offset:32 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +global_store_b32 v5, v1, s[2:3] offset:32 scale_offset +// GFX1250: global_store_b32 v5, v1, s[2:3] offset:32 scale_offset ; encoding: [0x02,0x80,0x06,0xee,0x00,0x00,0x81,0x00,0x05,0x20,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}global_store_b32 v5, v1, s[2:3] offset:32 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +global_atomic_add_u32 v2, v5, s[2:3] scale_offset +// GFX1250: global_atomic_add_u32 v2, v5, s[2:3] scale_offset ; encoding: [0x02,0x40,0x0d,0xee,0x00,0x00,0x81,0x02,0x02,0x00,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}global_atomic_add_u32 v2, v5, s[2:3] scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +scratch_load_b32 v5, v2, off scale_offset +// GFX1250: scratch_load_b32 v5, v2, off scale_offset ; encoding: [0x7c,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x00,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}scratch_load_b32 v5, v2, off scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +scratch_load_b32 v5, v2, off offset:32 scale_offset +// GFX1250: scratch_load_b32 v5, v2, off offset:32 scale_offset ; encoding: [0x7c,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x20,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}scratch_load_b32 v5, v2, off offset:32 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +scratch_load_b32 v5, v2, s1 offset:32 scale_offset +// GFX1250: scratch_load_b32 v5, v2, s1 offset:32 scale_offset ; encoding: [0x01,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x20,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}scratch_load_b32 v5, v2, s1 offset:32 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +scratch_store_b32 v2, v5, off scale_offset +// GFX1250: scratch_store_b32 v2, v5, off scale_offset ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x83,0x02,0x02,0x00,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}scratch_store_b32 v2, v5, off scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + +scratch_store_b32 v2, v5, s1 scale_offset +// GFX1250: scratch_store_b32 v2, v5, s1 scale_offset ; encoding: [0x01,0x80,0x06,0xed,0x00,0x00,0x83,0x02,0x02,0x00,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: scale_offset is not supported on this GPU +// GFX12-ERR-NEXT:{{^}}scratch_store_b32 v2, v5, s1 scale_offset +// GFX12-ERR-NEXT:{{^}} ^ + tensor_save s[0:1] // GFX1250: tensor_save s[0:1] ; encoding: [0x00,0x80,0x1b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU @@ -81,10 +129,18 @@ tensor_stop th:TH_STORE_BYPASS scope:SCOPE_SYS // GFX1250: tensor_stop th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x7c,0xc0,0x1b,0xee,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU +flat_atomic_add_f32 v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_add_f32 v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x15,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_add_f32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_add_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x15,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_add_u32 v1, v2, s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_add_u32 v1, v2, s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0xc0,0xff,0xff] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_add_u32 v2, v3, s[2:3] offset:-64 // GFX1250: flat_atomic_add_u32 v2, v3, s[2:3] offset:-64 ; encoding: [0x02,0x40,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0xc0,0xff,0xff] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -93,6 +149,14 @@ flat_atomic_add_u64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_add_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0xc0,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_add_u64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_add_u64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_and_b32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_and_b32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_and_b32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_and_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -101,18 +165,38 @@ flat_atomic_and_b64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_and_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_and_b64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_and_b64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_cmpswap_b32 v0, v2, v[2:3], s[2:3] scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_cmpswap_b32 v0, v2, v[2:3], s[2:3] scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0d,0xec,0x00,0x00,0x11,0x01,0x02,0x00,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_cmpswap_b32 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_cmpswap_b32 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x0d,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_cmpswap_b64 v[0:1], v2, v[2:5], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_cmpswap_b64 v[0:1], v2, v[2:5], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + flat_atomic_cmpswap_b64 v2, v[2:5], s[2:3] // GFX1250: flat_atomic_cmpswap_b64 v2, v[2:5], s[2:3] ; encoding: [0x02,0x80,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_cond_sub_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_cond_sub_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_cond_sub_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_cond_sub_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_dec_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_dec_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x10,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_dec_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_dec_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x10,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -121,6 +205,14 @@ flat_atomic_dec_u64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_dec_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x13,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_dec_u64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_dec_u64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x13,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_inc_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_inc_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_inc_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_inc_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -129,10 +221,22 @@ flat_atomic_inc_u64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_inc_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x13,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_inc_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_inc_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x13,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_max_num_f32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_max_num_f32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_max_num_f32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_max_num_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_max_i32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_max_i32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_max_i32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_max_i32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -141,6 +245,14 @@ flat_atomic_max_i64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_max_i64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0xc0,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_max_i64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_max_i64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_max_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_max_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_max_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_max_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -149,10 +261,22 @@ flat_atomic_max_u64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_max_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_max_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_max_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_min_num_f32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_min_num_f32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_min_num_f32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_min_num_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_min_i32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_min_i32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_min_i32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_min_i32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -161,6 +285,14 @@ flat_atomic_min_i64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_min_i64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_min_i64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_min_i64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_min_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_min_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_min_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_min_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -169,6 +301,14 @@ flat_atomic_min_u64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_min_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x80,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_min_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_min_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_or_b32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_or_b32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_or_b32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_or_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -177,10 +317,22 @@ flat_atomic_or_b64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_or_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x80,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_or_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_or_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_sub_clamp_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_sub_clamp_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_sub_clamp_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_sub_clamp_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_sub_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_sub_u32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_sub_u32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_sub_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -189,6 +341,14 @@ flat_atomic_sub_u64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_sub_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_sub_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_sub_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_swap_b32 v0, v2, s[2:3] scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_swap_b32 v0, v2, s[2:3] scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0c,0xec,0x00,0x00,0x11,0x01,0x00,0x00,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_swap_b32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_swap_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0c,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -197,6 +357,14 @@ flat_atomic_swap_b64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_swap_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_swap_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_swap_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_xor_b32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_xor_b32 v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_xor_b32 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_xor_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode @@ -205,10 +373,114 @@ flat_atomic_xor_b64 v2, v[2:3], s[2:3] offset:64 // GFX1250: flat_atomic_xor_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0xc0,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_xor_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_xor_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_atomic_pk_add_f16 v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_pk_add_f16 v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x16,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_pk_add_f16 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_pk_add_f16 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x16,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode +flat_atomic_pk_add_bf16 v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN +// GFX1250: flat_atomic_pk_add_bf16 v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x16,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + flat_atomic_pk_add_bf16 v2, v3, s[2:3] offset:64 // GFX1250: flat_atomic_pk_add_bf16 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x16,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode + +flat_load_b128 v[2:5], v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_b128 v[2:5], v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_b32 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_b32 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x05,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_b64 v[2:3], v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_b64 v[2:3], v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_b96 v[2:4], v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_b96 v[2:4], v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_d16_b16 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_d16_b16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_d16_hi_b16 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_d16_hi_b16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_d16_hi_i8 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_d16_hi_i8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_d16_hi_u8 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_d16_hi_u8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_d16_i8 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_d16_i8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x07,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_d16_u8 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_d16_u8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x07,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_i16 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_i16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_i8 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_i8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_u16 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_u16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_u8 v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_u8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_load_dword v1, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_load_b32 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x05,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_b128 v2, v[2:5], s[2:3] offset:64 scale_offset +// GFX1250: flat_store_b128 v2, v[2:5], s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x07,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_b16 v2, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_store_b16 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_b32 v2, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_store_b32 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_b64 v2, v[2:3], s[2:3] offset:64 scale_offset +// GFX1250: flat_store_b64 v2, v[2:3], s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_b8 v2, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_store_b8 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_b96 v2, v[2:4], s[2:3] offset:64 scale_offset +// GFX1250: flat_store_b96 v2, v[2:4], s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x07,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_d16_hi_b16 v2, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_store_d16_hi_b16 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x09,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. + +flat_store_d16_hi_b8 v2, v2, s[2:3] offset:64 scale_offset +// GFX1250: flat_store_d16_hi_b8 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x09,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: not a valid operand. diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_err.s new file mode 100644 index 0000000..26d7ed3 --- /dev/null +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vflat_err.s @@ -0,0 +1,59 @@ +// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s + +global_load_b96 v[1:3], v[0:1], off +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid register class: vgpr tuples must be 64 bit aligned + +flat_load_b32 v5, v[2:3] scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}flat_load_b32 v5, v[2:3] scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +flat_load_b32 v5, v[2:3] offset:32 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}flat_load_b32 v5, v[2:3] offset:32 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +flat_store_b32 v[2:3], v5 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}flat_store_b32 v[2:3], v5 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +flat_atomic_add v[2:3], v2 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}flat_atomic_add v[2:3], v2 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +global_load_b32 v5, v[2:3], off offset:32 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}global_load_b32 v5, v[2:3], off offset:32 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +global_store_b32 v[2:3], v5, off offset:32 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}global_store_b32 v[2:3], v5, off offset:32 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +global_atomic_add v[2:3], v2, off scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}global_atomic_add v[2:3], v2, off scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +global_load_addtid_b32 v5, s[2:3] scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}global_load_addtid_b32 v5, s[2:3] scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +global_store_addtid_b32 v5, s[2:3] scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}global_store_addtid_b32 v5, s[2:3] scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +scratch_load_b32 v5, off, s1 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}scratch_load_b32 v5, off, s1 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ + +scratch_load_b32 v5, off, off offset:32 scale_offset +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: scale_offset is not supported for this instruction +// GFX1250-ERR-NEXT:{{^}}scratch_load_b32 v5, off, off offset:32 scale_offset +// GFX1250-ERR-NEXT:{{^}} ^ diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop2_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop2_err.s index b68306d..f67ad88 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop2_err.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop2_err.s @@ -1,5 +1,8 @@ // RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s +v_add_f64 v[1:2], v[1:2], v[1:2] +// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid register class: vgpr tuples must be 64 bit aligned + v_fmaak_f32 v4, v2, v6, 3 row_share:1 // GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand. // GFX1250-ERR-NEXT:{{^}}v_fmaak_f32 v4, v2, v6, 3 row_share:1 diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_smem.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_smem.txt index 4bd9ab4..92fa802 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_smem.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_smem.txt @@ -5,3 +5,15 @@ # GFX1250: s_load_b32 s4, s[2:3], 0xa nv ; encoding: [0x01,0x01,0x10,0xf4,0x0a,0x00,0x00,0xf8] 0x01,0x01,0x10,0xf4,0x0a,0x00,0x00,0xf8 + +# GFX1250: s_load_b32 s4, s[2:3], 0xa scale_offset ; encoding: [0x01,0x01,0x00,0xf4,0x0a,0x00,0x00,0xf9] +0x01,0x01,0x00,0xf4,0x0a,0x00,0x00,0xf9 + +# GFX1250: s_load_b32 s4, s[2:3], 0xa scale_offset nv ; encoding: [0x01,0x01,0x10,0xf4,0x0a,0x00,0x00,0xf9] +0x01,0x01,0x10,0xf4,0x0a,0x00,0x00,0xf9 + +# GFX1250: s_load_b32 s4, s[2:3], m0 offset:0x20 scale_offset ; encoding: [0x01,0x01,0x00,0xf4,0x20,0x00,0x00,0xfb] +0x01,0x01,0x00,0xf4,0x20,0x00,0x00,0xfb + +# GFX1250: s_load_b32 s4, s[2:3], s5 offset:0x20 scale_offset ; encoding: [0x01,0x01,0x00,0xf4,0x20,0x00,0x00,0x0b] +0x01,0x01,0x00,0xf4,0x20,0x00,0x00,0x0b diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt index fcbb58b..3455f4c 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vflat.txt @@ -1,104 +1,269 @@ # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s +# GFX1250: flat_atomic_add_f32 v0, v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x15,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a] +0x02,0x80,0x15,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a + # GFX1250: flat_atomic_add_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x15,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x15,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_add_u32 v0, v1, v2, s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0xc0,0xff,0xff] +0x02,0x40,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0xc0,0xff,0xff + # GFX1250: flat_atomic_add_u32 v2, v3, s[2:3] offset:-64 ; encoding: [0x02,0x40,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0xc0,0xff,0xff] 0x02,0x40,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0xc0,0xff,0xff # GFX1250: flat_atomic_add_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0xc0,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_add_u64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff] +0x02,0xc0,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff + +# GFX1250: flat_atomic_and_b32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x00,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_and_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_and_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_and_b64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff] +0x02,0x40,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff + +# GFX1250: flat_atomic_cmpswap_b32 v0, v2, v[2:3], s[2:3] scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0d,0xec,0x00,0x00,0x11,0x01,0x02,0x00,0x00,0x00] +0x02,0x00,0x0d,0xec,0x00,0x00,0x11,0x01,0x02,0x00,0x00,0x00 + # GFX1250: flat_atomic_cmpswap_b32 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x0d,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x0d,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_cmpswap_b64 v2, v[2:5], s[2:3] ; encoding: [0x02,0x80,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x00,0x00,0x00] 0x02,0x80,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x00,0x00,0x00 +# GFX1250: flat_atomic_cmpswap_b64 v[0:1], v2, v[2:5], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x80,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_cond_sub_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x00,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_cond_sub_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_dec_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x10,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x00,0x10,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_dec_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x10,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x10,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_dec_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x13,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x13,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_dec_u64 v[0:1], v2, v[2:3], s[2:3] offset:-64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x13,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff] +0x02,0x40,0x13,0xec,0x00,0x00,0x11,0x01,0x02,0xc0,0xff,0xff + +# GFX1250: flat_atomic_inc_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0xc0,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_inc_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_inc_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x13,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x13,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_inc_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x13,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x00,0x13,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_max_i32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x80,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_max_i32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_max_i64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0xc0,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_max_i64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0xc0,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_max_num_f32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x80,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_max_num_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_max_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0xc0,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_max_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_max_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_max_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x00,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_min_i32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x00,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_min_i32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x00,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_min_i64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_min_i64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x40,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_min_num_f32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x40,0x14,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_min_num_f32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x14,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_min_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x40,0x0e,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_min_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x0e,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_min_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x80,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_min_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x80,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_or_b32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x40,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_or_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_or_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x80,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_or_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x80,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_pk_add_bf16 v0, v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x16,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a] +0x02,0x80,0x16,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a + # GFX1250: flat_atomic_pk_add_bf16 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x16,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x16,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_pk_add_f16 v0, v1, v2, s[2:3] offset:8000000 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x16,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a] +0x02,0x40,0x16,0xec,0x00,0x00,0x11,0x01,0x01,0x00,0x12,0x7a + # GFX1250: flat_atomic_pk_add_f16 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x40,0x16,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x16,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_sub_clamp_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0xc0,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_sub_clamp_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_sub_u32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x80,0x0d,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_sub_u32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x0d,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_sub_u64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x00,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x00,0x11,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_sub_u64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x00,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x00,0x11,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_swap_b32 v0, v0, v2, s[2:3] scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x0c,0xec,0x00,0x00,0x11,0x01,0x00,0x00,0x00,0x00] +0x02,0xc0,0x0c,0xec,0x00,0x00,0x11,0x01,0x00,0x00,0x00,0x00 + # GFX1250: flat_atomic_swap_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0xc0,0x0c,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x0c,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_swap_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0x40,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0x40,0x10,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_swap_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x40,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0x40,0x10,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_atomic_xor_b32 v0, v1, v2, s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0x80,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00] +0x02,0x80,0x0f,0xec,0x00,0x00,0x11,0x01,0x01,0x40,0x00,0x00 + # GFX1250: flat_atomic_xor_b32 v2, v3, s[2:3] offset:64 ; encoding: [0x02,0x80,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00] 0x02,0x80,0x0f,0xec,0x00,0x00,0x80,0x01,0x02,0x40,0x00,0x00 # GFX1250: flat_atomic_xor_b64 v2, v[2:3], s[2:3] offset:64 ; encoding: [0x02,0xc0,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00] 0x02,0xc0,0x12,0xec,0x00,0x00,0x00,0x01,0x02,0x40,0x00,0x00 +# GFX1250: flat_atomic_xor_b64 v[0:1], v2, v[2:3], s[2:3] offset:64 scale_offset th:TH_ATOMIC_RETURN ; encoding: [0x02,0xc0,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00] +0x02,0xc0,0x12,0xec,0x00,0x00,0x11,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_b128 v[2:5], v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0xc0,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_b32 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x05,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x00,0x05,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_b64 v[2:3], v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x40,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_b96 v[2:4], v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x80,0x05,0xec,0x02,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_d16_b16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x00,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_d16_hi_b16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0xc0,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_d16_hi_i8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x80,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_d16_hi_u8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x40,0x08,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_d16_i8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x07,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0xc0,0x07,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_d16_u8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x07,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x80,0x07,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_i16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0xc0,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_i8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x40,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_u16 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x80,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_load_u8 v1, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00] +0x02,0x00,0x04,0xec,0x01,0x00,0x01,0x00,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_b128 v2, v[2:5], s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x07,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x40,0x07,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_b16 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x40,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_b32 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x80,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x80,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_b64 v2, v[2:3], s[2:3] offset:64 scale_offset ; encoding: [0x02,0xc0,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0xc0,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_b8 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x00,0x06,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_b96 v2, v[2:4], s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x07,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x00,0x07,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_d16_hi_b16 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x40,0x09,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x40,0x09,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + +# GFX1250: flat_store_d16_hi_b8 v2, v2, s[2:3] offset:64 scale_offset ; encoding: [0x02,0x00,0x09,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00] +0x02,0x00,0x09,0xec,0x00,0x00,0x01,0x01,0x02,0x40,0x00,0x00 + # GFX1250: flat_atomic_add_f32 v1, v[0:1], v2 offset:-64 th:TH_ATOMIC_RETURN ; encoding: [0x7c,0x80,0x15,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff] 0x7c,0x80,0x15,0xec,0x01,0x00,0x10,0x01,0x00,0xc0,0xff,0xff @@ -2856,6 +3021,30 @@ # GFX1250: scratch_load_b32 v5, v2, off nv ; encoding: [0xfc,0x00,0x05,0xed,0x05,0x00,0x02,0x00,0x02,0x00,0x00,0x00] 0xfc,0x00,0x05,0xed,0x05,0x00,0x02,0x00,0x02,0x00,0x00,0x00 +# GFX1250: global_load_b32 v5, v1, s[2:3] offset:32 scale_offset ; encoding: [0x02,0x00,0x05,0xee,0x05,0x00,0x01,0x00,0x01,0x20,0x00,0x00] +0x02,0x00,0x05,0xee,0x05,0x00,0x01,0x00,0x01,0x20,0x00,0x00 + +# GFX1250: global_store_b32 v5, v1, s[2:3] offset:32 scale_offset ; encoding: [0x02,0x80,0x06,0xee,0x00,0x00,0x81,0x00,0x05,0x20,0x00,0x00] +0x02,0x80,0x06,0xee,0x00,0x00,0x81,0x00,0x05,0x20,0x00,0x00 + +# GFX1250: global_atomic_add_u32 v2, v5, s[2:3] scale_offset ; encoding: [0x02,0x40,0x0d,0xee,0x00,0x00,0x81,0x02,0x02,0x00,0x00,0x00] +0x02,0x40,0x0d,0xee,0x00,0x00,0x81,0x02,0x02,0x00,0x00,0x00 + +# GFX1250: scratch_load_b32 v5, v2, off scale_offset ; encoding: [0x7c,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x00,0x00,0x00] +0x7c,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x00,0x00,0x00 + +# GFX1250: scratch_load_b32 v5, v2, off offset:32 scale_offset ; encoding: [0x7c,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x20,0x00,0x00] +0x7c,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x20,0x00,0x00 + +# GFX1250: scratch_load_b32 v5, v2, s1 offset:32 scale_offset ; encoding: [0x01,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x20,0x00,0x00] +0x01,0x00,0x05,0xed,0x05,0x00,0x03,0x00,0x02,0x20,0x00,0x00 + +# GFX1250: scratch_store_b32 v2, v5, off scale_offset ; encoding: [0x7c,0x80,0x06,0xed,0x00,0x00,0x83,0x02,0x02,0x00,0x00,0x00] +0x7c,0x80,0x06,0xed,0x00,0x00,0x83,0x02,0x02,0x00,0x00,0x00 + +# GFX1250: scratch_store_b32 v2, v5, s1 scale_offset ; encoding: [0x01,0x80,0x06,0xed,0x00,0x00,0x83,0x02,0x02,0x00,0x00,0x00] +0x01,0x80,0x06,0xed,0x00,0x00,0x83,0x02,0x02,0x00,0x00,0x00 + # GFX1250: tensor_save s[0:1] ; encoding: [0x00,0x80,0x1b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00] 0x00,0x80,0x1b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 diff --git a/llvm/test/ThinLTO/X86/memprof-basic.ll b/llvm/test/ThinLTO/X86/memprof-basic.ll index 72d282f..c5eedb2 100644 --- a/llvm/test/ThinLTO/X86/memprof-basic.ll +++ b/llvm/test/ThinLTO/X86/memprof-basic.ll @@ -143,13 +143,14 @@ attributes #0 = { noinline optnone } !12 = !{i64 789, i64 300} !13 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !14, producer: "clang version 21.0.0git (git@github.com:llvm/llvm-project.git e391301e0e4d9183fe06e69602e87b0bc889aeda)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) !14 = !DIFile(filename: "basic.cc", directory: "", checksumkind: CSK_MD5, checksum: "8636c46e81402013b9d54e8307d2f149") -!15 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !14, file: !14, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !13) +!15 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !14, file: !14, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !13, declaration: !22) !16 = !DISubroutineType(types: !17) !17 = !{!18} !18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !19, size: 64) !19 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char) !20 = !{i32 7, !"Dwarf Version", i32 5} !21 = !{i32 2, !"Debug Info Version", i32 3} +!22 = !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !14, file: !14, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) ; DUMP: CCG before cloning: ; DUMP: Callsite Context Graph: @@ -321,7 +322,8 @@ attributes #0 = { noinline optnone } ; IR: attributes #[[NOTCOLD]] = { "memprof"="notcold" } ; IR: attributes #[[COLD]] = { "memprof"="cold" } ;; Make sure the clone's linkageName was updated. -; IR: ![[SP]] = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv.memprof.1" +; IR: ![[SP]] = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv.memprof.1", {{.*}} declaration: ![[SP2:[0-9]+]]) +; IR: ![[SP2]] = !DISubprogram(name: "bar", linkageName: "_Z3barv.memprof.1" ; STATS: 1 memprof-context-disambiguation - Number of cold static allocations (possibly cloned) diff --git a/llvm/test/Transforms/AggressiveInstCombine/X86/store-merge.ll b/llvm/test/Transforms/AggressiveInstCombine/X86/store-merge.ll index 38a55e1..4ab8d18 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/X86/store-merge.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/X86/store-merge.ll @@ -792,6 +792,105 @@ define void @test_i32_tbaa(i32 %x, ptr %p) { ret void } +define void @test_multiple_parts_with_gap1(i32 %x, ptr %p) { +; CHECK-LABEL: define void @test_multiple_parts_with_gap1( +; CHECK-SAME: i32 [[X:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X]] to i16 +; CHECK-NEXT: store i16 [[TMP1]], ptr [[P]], align 1 +; CHECK-NEXT: [[SHR_3:%.*]] = lshr i32 [[X]], 24 +; CHECK-NEXT: [[X_3:%.*]] = trunc i32 [[SHR_3]] to i8 +; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i8, ptr [[P]], i64 3 +; CHECK-NEXT: store i8 [[X_3]], ptr [[GEP_3]], align 1 +; CHECK-NEXT: ret void +; + %x.0 = trunc i32 %x to i8 + store i8 %x.0, ptr %p + %shr.1 = lshr i32 %x, 8 + %x.1 = trunc i32 %shr.1 to i8 + %gep.1 = getelementptr i8, ptr %p, i64 1 + store i8 %x.1, ptr %gep.1 + %shr.3 = lshr i32 %x, 24 + %x.3 = trunc i32 %shr.3 to i8 + %gep.3 = getelementptr i8, ptr %p, i64 3 + store i8 %x.3, ptr %gep.3 + ret void +} + +define void @test_multiple_parts_with_gap2(i32 %x, ptr %p) { +; CHECK-LABEL: define void @test_multiple_parts_with_gap2( +; CHECK-SAME: i32 [[X:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X]] to i8 +; CHECK-NEXT: store i8 [[X_0]], ptr [[P]], align 1 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr i8, ptr [[P]], i64 1 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X]], 16 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: store i16 [[TMP2]], ptr [[GEP_2]], align 1 +; CHECK-NEXT: ret void +; + %x.0 = trunc i32 %x to i8 + store i8 %x.0, ptr %p + %shr.2 = lshr i32 %x, 16 + %x.2 = trunc i32 %shr.2 to i8 + %gep.2 = getelementptr i8, ptr %p, i64 1 + store i8 %x.2, ptr %gep.2 + %shr.3 = lshr i32 %x, 24 + %x.3 = trunc i32 %shr.3 to i8 + %gep.3 = getelementptr i8, ptr %p, i64 2 + store i8 %x.3, ptr %gep.3 + ret void +} + +define void @test_multiple_parts_with_gap3(i64 %x, ptr %p) { +; CHECK-LABEL: define void @test_multiple_parts_with_gap3( +; CHECK-SAME: i64 [[X:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X]] to i16 +; CHECK-NEXT: store i16 [[TMP1]], ptr [[P]], align 1 +; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i8, ptr [[P]], i64 3 +; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[X]], 24 +; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i16 +; CHECK-NEXT: store i16 [[TMP3]], ptr [[GEP_3]], align 1 +; CHECK-NEXT: ret void +; + %x.0 = trunc i64 %x to i8 + store i8 %x.0, ptr %p + %shr.1 = lshr i64 %x, 8 + %x.1 = trunc i64 %shr.1 to i8 + %gep.1 = getelementptr i8, ptr %p, i64 1 + store i8 %x.1, ptr %gep.1 + %shr.3 = lshr i64 %x, 24 + %x.3 = trunc i64 %shr.3 to i8 + %gep.3 = getelementptr i8, ptr %p, i64 3 + store i8 %x.3, ptr %gep.3 + %shr.4 = lshr i64 %x, 32 + %x.4 = trunc i64 %shr.4 to i8 + %gep.4 = getelementptr i8, ptr %p, i64 4 + store i8 %x.4, ptr %gep.4 + ret void +} + +define void @test_store_same_parts_twice(i32 %x, ptr %p) { +; CHECK-LABEL: define void @test_store_same_parts_twice( +; CHECK-SAME: i32 [[X:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X]] to i16 +; CHECK-NEXT: store i16 [[TMP1]], ptr [[P]], align 1 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr i8, ptr [[P]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[X]] to i16 +; CHECK-NEXT: store i16 [[TMP2]], ptr [[GEP_2]], align 1 +; CHECK-NEXT: ret void +; + %x.0 = trunc i32 %x to i8 + store i8 %x.0, ptr %p + %shr.1 = lshr i32 %x, 8 + %x.1 = trunc i32 %shr.1 to i8 + %gep.1 = getelementptr i8, ptr %p, i64 1 + store i8 %x.1, ptr %gep.1 + %gep.2 = getelementptr i8, ptr %p, i64 2 + store i8 %x.0, ptr %gep.2 + %gep.3 = getelementptr i8, ptr %p, i64 3 + store i8 %x.1, ptr %gep.3 + ret void +} + !0 = !{!1} !1 = !{!1, !2} !2 = !{!2} diff --git a/llvm/test/Transforms/GVNSink/lifetime.ll b/llvm/test/Transforms/GVNSink/lifetime.ll new file mode 100644 index 0000000..1a8a69b --- /dev/null +++ b/llvm/test/Transforms/GVNSink/lifetime.ll @@ -0,0 +1,77 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=gvn-sink < %s | FileCheck %s + +; Make sure we do not sink lifetime markers if this would introduce a +; lifetime with non-alloca operand. + +define void @test_cant_sink(i1 %c) { +; CHECK-LABEL: define void @test_cant_sink( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: [[B:%.*]] = alloca i8, align 1 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[B]]) +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: +; CHECK-NEXT: store i64 1, ptr [[A]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: br label %[[JOIN:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: store i64 1, ptr [[B]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[B]]) +; CHECK-NEXT: br label %[[JOIN]] +; CHECK: [[JOIN]]: +; CHECK-NEXT: ret void +; + %a = alloca i8 + %b = alloca i8 + call void @llvm.lifetime.start(i64 1, ptr %a) + call void @llvm.lifetime.start(i64 1, ptr %b) + br i1 %c, label %if, label %else + +if: + store i64 1, ptr %a + call void @llvm.lifetime.end(i64 1, ptr %a) + br label %join + +else: + store i64 1, ptr %b + call void @llvm.lifetime.end(i64 1, ptr %b) + br label %join + +join: + ret void +} + +define void @test_can_sink(i1 %c) { +; CHECK-LABEL: define void @test_can_sink( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: +; CHECK-NEXT: br label %[[JOIN:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: br label %[[JOIN]] +; CHECK: [[JOIN]]: +; CHECK-NEXT: store i64 1, ptr [[A]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: ret void +; + %a = alloca i8 + call void @llvm.lifetime.start(i64 1, ptr %a) + br i1 %c, label %if, label %else + +if: + store i64 1, ptr %a + call void @llvm.lifetime.end(i64 1, ptr %a) + br label %join + +else: + store i64 1, ptr %a + call void @llvm.lifetime.end(i64 1, ptr %a) + br label %join + +join: + ret void +} diff --git a/llvm/test/Transforms/LoopSimplifyCFG/enter-through-indirectbr.ll b/llvm/test/Transforms/LoopSimplifyCFG/enter-through-indirectbr.ll new file mode 100644 index 0000000..dd524ab --- /dev/null +++ b/llvm/test/Transforms/LoopSimplifyCFG/enter-through-indirectbr.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -enable-loop-simplifycfg-term-folding=true -passes='require<domtree>,loop(loop-simplifycfg)' -verify-loop-info -verify-dom-info -verify-loop-lcssa < %s | FileCheck %s + +define void @test(ptr %addr) { +; CHECK-LABEL: define void @test( +; CHECK-SAME: ptr [[ADDR:%.*]]) { +; CHECK-NEXT: indirectbr ptr [[ADDR]], [label %[[A:.*]], label %C] +; CHECK: [[A]]: +; CHECK-NEXT: br i1 true, label %[[B:.*]], label %[[C_LOOPEXIT:.*]] +; CHECK: [[B]]: +; CHECK-NEXT: br i1 true, label %[[A]], label %[[C_LOOPEXIT]] +; CHECK: [[C_LOOPEXIT]]: +; CHECK-NEXT: br label %[[C:.*]] +; CHECK: [[C]]: +; CHECK-NEXT: unreachable +; + + indirectbr ptr %addr, [label %A, label %C] + +A: + br i1 true, label %B, label %C + +B: + br i1 true, label %A, label %C + +C: + unreachable +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/check-prof-info.ll b/llvm/test/Transforms/LoopVectorize/AArch64/check-prof-info.ll index 1f61989..812bca9 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/check-prof-info.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/check-prof-info.ll @@ -46,27 +46,17 @@ define void @_Z3foov() { ; CHECK-V2-IC4-LABEL: define void @_Z3foov( ; CHECK-V2-IC4-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-V2-IC4: [[VEC_EPILOG_VECTOR_BODY1:.*:]] -; CHECK-V2-IC4: br i1 [[MIN_ITERS_CHECK:%.*]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]], !prof [[PROF0:![0-9]+]] -; CHECK-V2-IC4: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: -; CHECK-V2-IC4: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]], !prof [[PROF0]] +; CHECK-V2-IC4: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !prof [[PROF0:![0-9]+]] ; CHECK-V2-IC4: [[VECTOR_PH]]: ; CHECK-V2-IC4: br label %[[VECTOR_BODY:.*]] ; CHECK-V2-IC4: [[VECTOR_BODY]]: -; CHECK-V2-IC4: br i1 [[TMP12:%.*]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK-V2-IC4: br i1 [[TMP10:%.*]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK-V2-IC4: [[MIDDLE_BLOCK]]: -; CHECK-V2-IC4: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]], !prof [[PROF5:![0-9]+]] -; CHECK-V2-IC4: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-V2-IC4: br i1 [[MIN_EPILOG_ITERS_CHECK:%.*]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF6:![0-9]+]] -; CHECK-V2-IC4: [[VEC_EPILOG_PH]]: -; CHECK-V2-IC4: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] -; CHECK-V2-IC4: [[VEC_EPILOG_VECTOR_BODY]]: -; CHECK-V2-IC4: br i1 [[TMP23:%.*]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] -; CHECK-V2-IC4: [[VEC_EPILOG_MIDDLE_BLOCK]]: -; CHECK-V2-IC4: br i1 [[CMP_N:%.*]], label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]], !prof [[PROF8:![0-9]+]] -; CHECK-V2-IC4: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-V2-IC4: br i1 true, label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]], !prof [[PROF5:![0-9]+]] +; CHECK-V2-IC4: [[SCALAR_PH]]: ; CHECK-V2-IC4: br label %[[FOR_BODY:.*]] ; CHECK-V2-IC4: [[FOR_BODY]]: -; CHECK-V2-IC4: br i1 [[EXITCOND:%.*]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !prof [[PROF9:![0-9]+]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-V2-IC4: br i1 [[EXITCOND:%.*]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !prof [[PROF6:![0-9]+]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-V2-IC4: [[FOR_COND_CLEANUP]]: ; entry: @@ -111,9 +101,6 @@ for.cond.cleanup: ; preds = %for.body ; CHECK-V2-IC4: [[META3]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK-V2-IC4: [[META4]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK-V2-IC4: [[PROF5]] = !{!"branch_weights", i32 1, i32 15} -; CHECK-V2-IC4: [[PROF6]] = !{!"branch_weights", i32 2, i32 0} -; CHECK-V2-IC4: [[LOOP7]] = distinct !{[[LOOP7]], [[META3]], [[META4]]} -; CHECK-V2-IC4: [[PROF8]] = !{!"branch_weights", i32 1, i32 1} -; CHECK-V2-IC4: [[PROF9]] = !{!"branch_weights", i32 0, i32 0} -; CHECK-V2-IC4: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]], [[META3]]} +; CHECK-V2-IC4: [[PROF6]] = !{!"branch_weights", i32 0, i32 0} +; CHECK-V2-IC4: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]], [[META3]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll new file mode 100644 index 0000000..298ef09 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll @@ -0,0 +1,399 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 +; RUN: opt -passes=loop-vectorize -S %s | FileCheck %s + +target triple = "aarch64-linux-gnu" + +; Original loop has trip count 16, but contains interleave groups with gaps, so +; the last iteration must execute in the scalar loop. Thus the vector loop can +; only execute up to 15 iterations. +define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i32 %x) #0 { +; CHECK-LABEL: define i64 @vector_loop_with_remaining_iterations( +; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ITER_CHECK:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 17, [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[X]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[BROADCAST_SPLAT]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[BROADCAST_SPLAT]], i1 false) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], i64 [[INDEX]], i32 0, i64 3 +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <64 x i8>, ptr [[TMP4]], align 1 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <64 x i8> [[WIDE_VEC]], <64 x i8> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> +; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[STRIDED_VEC]] to <16 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[TMP2]], <16 x i32> [[TMP5]]) +; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[TMP3]], <16 x i32> [[TMP6]]) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 0 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[TMP10:%.*]] = zext <16 x i32> [[TMP7]] to <16 x i64> +; CHECK-NEXT: [[TMP11]] = or <16 x i64> [[VEC_PHI]], [[TMP10]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP11]]) +; CHECK-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 1, [[TMP15]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP13]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 17, [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 [[N_MOD_VF]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 17, [[TMP19]] +; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 2 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0 +; CHECK-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> [[BROADCAST_SPLAT2]], i1 false) +; CHECK-NEXT: [[TMP24:%.*]] = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> [[BROADCAST_SPLAT2]], i1 false) +; CHECK-NEXT: [[TMP25:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[BC_RESUME_VAL]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = mul <vscale x 2 x i64> [[TMP25]], splat (i64 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[BROADCAST_SPLAT4]], [[TMP26]] +; CHECK-NEXT: [[TMP27:%.*]] = mul i64 1, [[TMP21]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP27]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI8:%.*]] = phi <vscale x 2 x i64> [ [[TMP22]], %[[VEC_EPILOG_PH]] ], [ [[TMP35:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], <vscale x 2 x i64> [[VEC_IND]], i32 0, i64 3 +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> [[TMP28]], i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i8> poison) +; CHECK-NEXT: [[TMP29:%.*]] = zext <vscale x 2 x i8> [[WIDE_MASKED_GATHER]] to <vscale x 2 x i32> +; CHECK-NEXT: [[TMP30:%.*]] = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> [[TMP23]], <vscale x 2 x i32> [[TMP29]]) +; CHECK-NEXT: [[TMP31:%.*]] = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> [[TMP24]], <vscale x 2 x i32> [[TMP30]]) +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX7]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i32 0 +; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP33]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = zext <vscale x 2 x i32> [[TMP31]] to <vscale x 2 x i64> +; CHECK-NEXT: [[TMP35]] = or <vscale x 2 x i64> [[VEC_PHI8]], [[TMP34]] +; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP21]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP37:%.*]] = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> [[TMP35]]) +; CHECK-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL10:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 16, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX11:%.*]] = phi i64 [ [[TMP37]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP13]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL10]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX11]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_I_I:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], i64 [[IV]], i32 0, i64 3 +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC_I_I]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 +; CHECK-NEXT: [[ABS_0:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false) +; CHECK-NEXT: [[MIN_0:%.*]] = call i32 @llvm.umin.i32(i32 [[ABS_0]], i32 [[L_EXT]]) +; CHECK-NEXT: [[ABS_1:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false) +; CHECK-NEXT: [[MIN_1:%.*]] = call i32 @llvm.umin.i32(i32 [[ABS_1]], i32 [[MIN_0]]) +; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 +; CHECK-NEXT: [[MIN_EXT:%.*]] = zext i32 [[MIN_1]] to i64 +; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED]], [[MIN_EXT]] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_I_I:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; CHECK-NEXT: br i1 [[EXITCOND_NOT_I_I]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] + %gep.src.i.i = getelementptr { [4 x i8] }, ptr %src, i64 %iv, i32 0, i64 3 + %l = load i8, ptr %gep.src.i.i, align 1 + %l.ext = zext i8 %l to i32 + %abs.0 = call i32 @llvm.abs.i32(i32 %x, i1 false) + %min.0 = call i32 @llvm.umin.i32(i32 %abs.0, i32 %l.ext) + %abs.1 = call i32 @llvm.abs.i32(i32 %x, i1 false) + %min.1 = call i32 @llvm.umin.i32(i32 %abs.1, i32 %min.0) + %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv + store i8 0, ptr %gep.dst, align 1 + %min.ext = zext i32 %min.1 to i64 + %red.next = or i64 %red, %min.ext + %iv.next = add i64 %iv, 1 + %exitcond.not.i.i = icmp eq i64 %iv.next, 17 + br i1 %exitcond.not.i.i, label %exit, label %loop + +exit: + ret i64 %red.next +} + +; Original loop has trip count 17, but contains interleave groups with gaps, so +; the last iteration must execute in the scalar loop. Thus the vector loop can +; only execute up to 16 iterations. +define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr noalias %dst, i32 %x) #0 { +; CHECK-LABEL: define i64 @main_vector_loop_fixed_with_no_remaining_iterations( +; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[X:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ITER_CHECK:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 17, [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[X]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[BROADCAST_SPLAT]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[BROADCAST_SPLAT]], i1 false) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], i64 [[INDEX]], i32 0, i64 3 +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <64 x i8>, ptr [[TMP4]], align 1 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <64 x i8> [[WIDE_VEC]], <64 x i8> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> +; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[STRIDED_VEC]] to <16 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[TMP2]], <16 x i32> [[TMP5]]) +; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[TMP3]], <16 x i32> [[TMP6]]) +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP26]], i32 0 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP10:%.*]] = zext <16 x i32> [[TMP7]] to <16 x i64> +; CHECK-NEXT: [[TMP11]] = or <16 x i64> [[VEC_PHI]], [[TMP10]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP11]]) +; CHECK-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 1, [[TMP15]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP13]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 17, [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 [[N_MOD_VF]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 17, [[TMP19]] +; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 2 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0 +; CHECK-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> [[BROADCAST_SPLAT2]], i1 false) +; CHECK-NEXT: [[TMP24:%.*]] = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> [[BROADCAST_SPLAT2]], i1 false) +; CHECK-NEXT: [[TMP25:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[BC_RESUME_VAL]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP38:%.*]] = mul <vscale x 2 x i64> [[TMP25]], splat (i64 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[BROADCAST_SPLAT4]], [[TMP38]] +; CHECK-NEXT: [[TMP39:%.*]] = mul i64 1, [[TMP21]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP39]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI8:%.*]] = phi <vscale x 2 x i64> [ [[TMP22]], %[[VEC_EPILOG_PH]] ], [ [[TMP35:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], <vscale x 2 x i64> [[VEC_IND]], i32 0, i64 3 +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> [[TMP28]], i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i8> poison) +; CHECK-NEXT: [[TMP29:%.*]] = zext <vscale x 2 x i8> [[WIDE_MASKED_GATHER]] to <vscale x 2 x i32> +; CHECK-NEXT: [[TMP30:%.*]] = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> [[TMP23]], <vscale x 2 x i32> [[TMP29]]) +; CHECK-NEXT: [[TMP31:%.*]] = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> [[TMP24]], <vscale x 2 x i32> [[TMP30]]) +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX7]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i32 0 +; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP33]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = zext <vscale x 2 x i32> [[TMP31]] to <vscale x 2 x i64> +; CHECK-NEXT: [[TMP35]] = or <vscale x 2 x i64> [[VEC_PHI8]], [[TMP34]] +; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], [[TMP21]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP37:%.*]] = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> [[TMP35]]) +; CHECK-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL10:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 16, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX11:%.*]] = phi i64 [ [[TMP37]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP13]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL10]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX11]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_I_I:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], i64 [[IV]], i32 0, i64 3 +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC_I_I]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 +; CHECK-NEXT: [[ABS_0:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false) +; CHECK-NEXT: [[MIN_0:%.*]] = call i32 @llvm.umin.i32(i32 [[ABS_0]], i32 [[L_EXT]]) +; CHECK-NEXT: [[ABS_1:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false) +; CHECK-NEXT: [[MIN_1:%.*]] = call i32 @llvm.umin.i32(i32 [[ABS_1]], i32 [[MIN_0]]) +; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 +; CHECK-NEXT: [[MIN_EXT:%.*]] = zext i32 [[MIN_1]] to i64 +; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED]], [[MIN_EXT]] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_I_I:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; CHECK-NEXT: br i1 [[EXITCOND_NOT_I_I]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] + %gep.src.i.i = getelementptr { [4 x i8] }, ptr %src, i64 %iv, i32 0, i64 3 + %l = load i8, ptr %gep.src.i.i, align 1 + %l.ext = zext i8 %l to i32 + %abs.0 = call i32 @llvm.abs.i32(i32 %x, i1 false) + %min.0 = call i32 @llvm.umin.i32(i32 %abs.0, i32 %l.ext) + %abs.1 = call i32 @llvm.abs.i32(i32 %x, i1 false) + %min.1 = call i32 @llvm.umin.i32(i32 %abs.1, i32 %min.0) + %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv + store i8 0, ptr %gep.dst, align 1 + %min.ext = zext i32 %min.1 to i64 + %red.next = or i64 %red, %min.ext + %iv.next = add i64 %iv, 1 + %exitcond.not.i.i = icmp eq i64 %iv.next, 17 + br i1 %exitcond.not.i.i, label %exit, label %loop + +exit: + ret i64 %red.next +} + +; Test case for https://github.com/llvm/llvm-project/issues/149726. +define void @main_vector_loop_fixed_single_vector_iteration_with_runtime_checks(ptr noalias %A, ptr noalias %B, ptr noalias %C, ptr noalias %D, ptr noalias %E, ptr noalias %F, ptr noalias %G, ptr noalias %H, ptr noalias %I, ptr noalias %J, ptr noalias %K, ptr %L) #1 { +; CHECK-LABEL: define void @main_vector_loop_fixed_single_vector_iteration_with_runtime_checks( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], ptr noalias [[D:%.*]], ptr noalias [[E:%.*]], ptr noalias [[F:%.*]], ptr noalias [[G:%.*]], ptr noalias [[H:%.*]], ptr noalias [[I:%.*]], ptr noalias [[J:%.*]], ptr noalias [[K:%.*]], ptr [[L:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 true, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[J]], i64 0 +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i64>, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i64> [[WIDE_VEC]], <16 x i64> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> +; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[STRIDED_VEC]] to <8 x i16> +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[K]], i64 0 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[K]], i64 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[K]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[K]], i64 6 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[K]], i64 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[K]], i64 10 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i16, ptr [[K]], i64 12 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i16, ptr [[K]], i64 14 +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <8 x i16> [[TMP1]], i32 0 +; CHECK-NEXT: store i16 [[TMP14]], ptr [[TMP6]], align 2 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <8 x i16> [[TMP1]], i32 1 +; CHECK-NEXT: store i16 [[TMP15]], ptr [[TMP7]], align 2 +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <8 x i16> [[TMP1]], i32 2 +; CHECK-NEXT: store i16 [[TMP16]], ptr [[TMP8]], align 2 +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <8 x i16> [[TMP1]], i32 3 +; CHECK-NEXT: store i16 [[TMP17]], ptr [[TMP9]], align 2 +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4 +; CHECK-NEXT: store i16 [[TMP18]], ptr [[TMP10]], align 2 +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5 +; CHECK-NEXT: store i16 [[TMP19]], ptr [[TMP11]], align 2 +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6 +; CHECK-NEXT: store i16 [[TMP20]], ptr [[TMP12]], align 2 +; CHECK-NEXT: [[TMP21:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 +; CHECK-NEXT: store i16 [[TMP21]], ptr [[TMP13]], align 2 +; CHECK-NEXT: store i64 0, ptr [[A]], align 8 +; CHECK-NEXT: store i64 0, ptr [[B]], align 8 +; CHECK-NEXT: store i64 0, ptr [[C]], align 8 +; CHECK-NEXT: store i64 0, ptr [[D]], align 8 +; CHECK-NEXT: store i64 0, ptr [[E]], align 8 +; CHECK-NEXT: store i64 0, ptr [[F]], align 8 +; CHECK-NEXT: store i64 0, ptr [[G]], align 8 +; CHECK-NEXT: store i64 0, ptr [[H]], align 8 +; CHECK-NEXT: store i64 0, ptr [[I]], align 8 +; CHECK-NEXT: store i64 0, ptr [[L]], align 8 +; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_J:%.*]] = getelementptr i64, ptr [[J]], i64 [[IV]] +; CHECK-NEXT: [[L_J:%.*]] = load i64, ptr [[GEP_J]], align 8 +; CHECK-NEXT: [[L_TRUNC:%.*]] = trunc i64 [[L_J]] to i16 +; CHECK-NEXT: [[GEP_K:%.*]] = getelementptr i16, ptr [[K]], i64 [[IV]] +; CHECK-NEXT: store i16 [[L_TRUNC]], ptr [[GEP_K]], align 2 +; CHECK-NEXT: store i64 0, ptr [[A]], align 8 +; CHECK-NEXT: store i64 0, ptr [[B]], align 8 +; CHECK-NEXT: store i64 0, ptr [[C]], align 8 +; CHECK-NEXT: store i64 0, ptr [[D]], align 8 +; CHECK-NEXT: store i64 0, ptr [[E]], align 8 +; CHECK-NEXT: store i64 0, ptr [[F]], align 8 +; CHECK-NEXT: store i64 0, ptr [[G]], align 8 +; CHECK-NEXT: store i64 0, ptr [[H]], align 8 +; CHECK-NEXT: store i64 0, ptr [[I]], align 8 +; CHECK-NEXT: store i64 0, ptr [[L]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 2 +; CHECK-NEXT: [[EC:%.*]] = icmp ult i64 [[IV]], 14 +; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.J = getelementptr i64, ptr %J, i64 %iv + %l.J = load i64, ptr %gep.J, align 8 + %l.trunc = trunc i64 %l.J to i16 + %gep.K = getelementptr i16, ptr %K, i64 %iv + store i16 %l.trunc, ptr %gep.K, align 2 + store i64 0, ptr %A, align 8 + store i64 0, ptr %B, align 8 + store i64 0, ptr %C, align 8 + store i64 0, ptr %D, align 8 + store i64 0, ptr %E, align 8 + store i64 0, ptr %F, align 8 + store i64 0, ptr %G, align 8 + store i64 0, ptr %H, align 8 + store i64 0, ptr %I, align 8 + store i64 0, ptr %L, align 8 + %iv.next = add i64 %iv, 2 + %ec = icmp ult i64 %iv, 14 + br i1 %ec, label %loop, label %exit, !llvm.loop !0 + +exit: + ret void +} + +declare i32 @llvm.umin.i32(i32, i32) + +declare i32 @llvm.abs.i32(i32, i1 immarg) + +attributes #0 = { "target-cpu"="neoverse-512tvb" } +attributes #1 = { "target-cpu"="grace" } + +!0 = distinct !{!0, !1, !2} +!1 = !{!"llvm.loop.mustprogress"} +!2 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index 400b031..7090ae8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -7,11 +7,7 @@ target triple = "aarch64-none-unknown-elf" define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-LABEL: define i32 @dotp( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: iter.check: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] -; CHECK: vector.main.loop.iter.check: +; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -33,64 +29,8 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] -; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4 -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP13]] -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] -; CHECK: vec.epilog.ph: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 1024, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP11]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP15]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 4 -; CHECK-NEXT: [[TMP18:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 -; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] -; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ [[TMP18]], [[VEC_EPILOG_PH]] ], [ [[TMP27:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX2]] -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP21]], align 1 -; CHECK-NEXT: [[TMP22:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX2]] -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP23]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i8>, ptr [[TMP24]], align 1 -; CHECK-NEXT: [[TMP25:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD5]] to <vscale x 4 x i32> -; CHECK-NEXT: [[TMP26:%.*]] = mul <vscale x 4 x i32> [[TMP25]], [[TMP22]] -; CHECK-NEXT: [[TMP27]] = add <vscale x 4 x i32> [[TMP26]], [[VEC_PHI3]] -; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX2]], [[TMP17]] -; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: vec.epilog.middle.block: -; CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP27]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT]], label [[VEC_EPILOG_SCALAR_PH]] -; CHECK: vec.epilog.scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 1024, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX7:%.*]] = phi i32 [ [[TMP29]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP11]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ] -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX7]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 -; CHECK-NEXT: [[EXT_A:%.*]] = zext i8 [[LOAD_A]] to i32 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 -; CHECK-NEXT: [[EXT_B:%.*]] = zext i8 [[LOAD_B]] to i32 -; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[EXT_B]], [[EXT_A]] -; CHECK-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: for.exit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ [[TMP29]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_PH]] +; CHECK: scalar.ph: ; entry: br label %for.body @@ -142,7 +82,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]] @@ -174,7 +114,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[TMP13]] = add <4 x i32> [[TMP14]], [[VEC_PHI9]] ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX9]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]]) ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]] @@ -198,7 +138,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[CMP_IV_NEG:%.*]] = icmp ugt i64 [[IV_NEG]], 0 ; CHECK-NEXT: [[CMP_IV:%.*]] = icmp ne i64 [[ACCUM1]], -1 ; CHECK-NEXT: [[EXITCOND:%.*]] = and i1 [[CMP_IV_NEG]], [[CMP_IV]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: while.end.loopexit: ; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ [[ADD]], [[WHILE_BODY1]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret void @@ -557,7 +497,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: br label [[EXIT:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-no-remaining-iterations.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-no-remaining-iterations.ll deleted file mode 100644 index d85bc48..0000000 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-no-remaining-iterations.ll +++ /dev/null @@ -1,146 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 -; RUN: opt -passes=loop-vectorize -S %s | FileCheck %s - -target triple = "aarch64-linux-gnu" - -define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr noalias %dst, i32 %x) #0 { -; CHECK-LABEL: define i64 @main_vector_loop_fixed_with_no_remaining_iterations( -; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ITER_CHECK:.*]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 16, [[TMP3]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] -; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: -; CHECK-NEXT: br i1 true, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[X]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[BROADCAST_SPLAT]], i1 false) -; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i32> @llvm.abs.v16i32(<16 x i32> [[BROADCAST_SPLAT]], i1 false) -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP17:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], i64 [[INDEX]], i32 0, i64 3 -; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <64 x i8>, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <64 x i8> [[WIDE_VEC2]], <64 x i8> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> -; CHECK-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[STRIDED_VEC3]] to <16 x i32> -; CHECK-NEXT: [[TMP8:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[TMP0]], <16 x i32> [[TMP6]]) -; CHECK-NEXT: [[TMP10:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP8]]) -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0 -; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP12]], align 1 -; CHECK-NEXT: [[TMP15:%.*]] = zext <16 x i32> [[TMP10]] to <16 x i64> -; CHECK-NEXT: [[TMP17]] = or <16 x i64> [[VEC_PHI1]], [[TMP15]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP17]]) -; CHECK-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] -; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 2 -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 16, [[TMP14]] -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] -; CHECK: [[VEC_EPILOG_PH]]: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP31]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 16, [[TMP16]] -; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP36:%.*]] = select i1 [[TMP32]], i64 [[TMP16]], i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 16, [[TMP36]] -; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP20:%.*]] = mul nuw i64 [[TMP19]], 2 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[X]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP21:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0 -; CHECK-NEXT: [[TMP22:%.*]] = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> [[BROADCAST_SPLAT2]], i1 false) -; CHECK-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> [[BROADCAST_SPLAT2]], i1 false) -; CHECK-NEXT: [[TMP24:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[VEC_EPILOG_RESUME_VAL]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP25:%.*]] = mul <vscale x 2 x i64> [[TMP24]], splat (i64 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP25]] -; CHECK-NEXT: [[TMP37:%.*]] = mul i64 1, [[TMP20]] -; CHECK-NEXT: [[DOTSPLATINSERT4:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP37]], i64 0 -; CHECK-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT4]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer -; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] -; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <vscale x 2 x i64> [ [[TMP21]], %[[VEC_EPILOG_PH]] ], [ [[TMP34:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP38:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], <vscale x 2 x i64> [[VEC_IND]], i32 0, i64 3 -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> [[TMP38]], i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i8> poison) -; CHECK-NEXT: [[TMP28:%.*]] = zext <vscale x 2 x i8> [[WIDE_MASKED_GATHER]] to <vscale x 2 x i32> -; CHECK-NEXT: [[TMP29:%.*]] = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> [[TMP22]], <vscale x 2 x i32> [[TMP28]]) -; CHECK-NEXT: [[TMP39:%.*]] = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> [[TMP23]], <vscale x 2 x i32> [[TMP29]]) -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX6]] -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP26]], i32 0 -; CHECK-NEXT: store <vscale x 2 x i8> zeroinitializer, ptr [[TMP27]], align 1 -; CHECK-NEXT: [[TMP33:%.*]] = zext <vscale x 2 x i32> [[TMP39]] to <vscale x 2 x i64> -; CHECK-NEXT: [[TMP34]] = or <vscale x 2 x i64> [[VEC_PHI6]], [[TMP33]] -; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX6]], [[TMP20]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT5]] -; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP35]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP30:%.*]] = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> [[TMP34]]) -; CHECK-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] -; CHECK: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 0, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] -; CHECK-NEXT: [[BC_MERGE_RDX9:%.*]] = phi i64 [ [[TMP30]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL8]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX9]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC_I_I:%.*]] = getelementptr { [4 x i8] }, ptr [[SRC]], i64 [[IV]], i32 0, i64 3 -; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC_I_I]], align 1 -; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 -; CHECK-NEXT: [[ABS_0:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false) -; CHECK-NEXT: [[MIN_0:%.*]] = call i32 @llvm.umin.i32(i32 [[ABS_0]], i32 [[L_EXT]]) -; CHECK-NEXT: [[ABS_1:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false) -; CHECK-NEXT: [[MIN_1:%.*]] = call i32 @llvm.umin.i32(i32 [[ABS_1]], i32 [[MIN_0]]) -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[MIN_EXT:%.*]] = zext i32 [[MIN_1]] to i64 -; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED]], [[MIN_EXT]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT_I_I:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EXITCOND_NOT_I_I]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOP]] ] -; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]] -; -entry: - br label %loop - -loop: - %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] - %red = phi i64 [ 0, %entry ], [ %red.next, %loop ] - %gep.src.i.i = getelementptr { [4 x i8] }, ptr %src, i64 %iv, i32 0, i64 3 - %l = load i8, ptr %gep.src.i.i, align 1 - %l.ext = zext i8 %l to i32 - %abs.0 = call i32 @llvm.abs.i32(i32 %x, i1 false) - %min.0 = call i32 @llvm.umin.i32(i32 %abs.0, i32 %l.ext) - %abs.1 = call i32 @llvm.abs.i32(i32 %x, i1 false) - %min.1 = call i32 @llvm.umin.i32(i32 %abs.1, i32 %min.0) - %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv - store i8 0, ptr %gep.dst, align 1 - %min.ext = zext i32 %min.1 to i64 - %red.next = or i64 %red, %min.ext - %iv.next = add i64 %iv, 1 - %exitcond.not.i.i = icmp eq i64 %iv.next, 16 - br i1 %exitcond.not.i.i, label %exit, label %loop - -exit: - ret i64 %red.next -} - -declare i32 @llvm.umin.i32(i32, i32) - -declare i32 @llvm.abs.i32(i32, i1 immarg) - -attributes #0 = { "target-cpu"="neoverse-512tvb" } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll index 45357dd..dbe6f27 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^scalar.ph:" --version 2 ; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=scalar-epilogue %s 2>&1 | FileCheck %s -check-prefix=SCALAR_EPILOGUE -; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_TAIL_FOLDING -; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data-with-evl %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_EVL +; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA +; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data-with-evl %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA-WITH-EVL target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" @@ -55,105 +55,105 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; SCALAR_EPILOGUE-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; SCALAR_EPILOGUE: scalar.ph: ; -; PREDICATED_TAIL_FOLDING-LABEL: define void @masked_strided_factor2 -; PREDICATED_TAIL_FOLDING-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] { -; PREDICATED_TAIL_FOLDING-NEXT: entry: -; PREDICATED_TAIL_FOLDING-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; PREDICATED_TAIL_FOLDING: vector.ph: -; PREDICATED_TAIL_FOLDING-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 -; PREDICATED_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; PREDICATED_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] -; PREDICATED_TAIL_FOLDING: vector.body: -; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP8]] -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP9]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP12]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x ptr> [[TMP15]], i32 1, <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP17]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP16]], <vscale x 16 x ptr> [[TMP18]], i32 1, <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; PREDICATED_TAIL_FOLDING: middle.block: -; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_DATA-LABEL: define void @masked_strided_factor2 +; PREDICATED_DATA-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] { +; PREDICATED_DATA-NEXT: entry: +; PREDICATED_DATA-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; PREDICATED_DATA: vector.ph: +; PREDICATED_DATA-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 +; PREDICATED_DATA-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 +; PREDICATED_DATA-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 +; PREDICATED_DATA-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] +; PREDICATED_DATA-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0 +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-NEXT: br label [[VECTOR_BODY:%.*]] +; PREDICATED_DATA: vector.body: +; PREDICATED_DATA-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024) +; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) +; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP8]] +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP9]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) +; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP12]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) +; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]] +; PREDICATED_DATA-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x ptr> [[TMP15]], i32 1, <vscale x 16 x i1> [[TMP6]]) +; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]] +; PREDICATED_DATA-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP17]] +; PREDICATED_DATA-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP16]], <vscale x 16 x ptr> [[TMP18]], i32 1, <vscale x 16 x i1> [[TMP6]]) +; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] +; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; PREDICATED_DATA-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; PREDICATED_DATA: middle.block: +; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] +; PREDICATED_DATA: scalar.ph: ; -; PREDICATED_EVL-LABEL: define void @masked_strided_factor2 -; PREDICATED_EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] { -; PREDICATED_EVL-NEXT: entry: -; PREDICATED_EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; PREDICATED_EVL: vector.ph: -; PREDICATED_EVL-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 -; PREDICATED_EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_EVL-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 -; PREDICATED_EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 -; PREDICATED_EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; PREDICATED_EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; PREDICATED_EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_EVL-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_EVL-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; PREDICATED_EVL-NEXT: br label [[VECTOR_BODY:%.*]] -; PREDICATED_EVL: vector.body: -; PREDICATED_EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]] -; PREDICATED_EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_EVL-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_EVL-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) -; PREDICATED_EVL-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP8]] -; PREDICATED_EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP9]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) -; PREDICATED_EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] -; PREDICATED_EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) -; PREDICATED_EVL-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]] -; PREDICATED_EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x ptr> align 1 [[TMP15]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]] -; PREDICATED_EVL-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP17]] -; PREDICATED_EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP16]], <vscale x 16 x ptr> align 1 [[TMP18]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]] -; PREDICATED_EVL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; PREDICATED_EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_EVL-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; PREDICATED_EVL: middle.block: -; PREDICATED_EVL-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_EVL: scalar.ph: +; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor2 +; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] { +; PREDICATED_DATA-WITH-EVL-NEXT: entry: +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; PREDICATED_DATA-WITH-EVL: vector.ph: +; PREDICATED_DATA-WITH-EVL-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 +; PREDICATED_DATA-WITH-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 +; PREDICATED_DATA-WITH-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_DATA-WITH-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; PREDICATED_DATA-WITH-EVL: vector.body: +; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 1) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP8:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP8]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP9]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP13:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP14]] +; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP13]], <vscale x 16 x ptr> align 1 [[TMP15]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP13]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP17]] +; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP16]], <vscale x 16 x ptr> align 1 [[TMP18]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; PREDICATED_DATA-WITH-EVL: middle.block: +; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]] +; PREDICATED_DATA-WITH-EVL: scalar.ph: ; entry: %conv = zext i8 %guard to i32 @@ -256,137 +256,137 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; SCALAR_EPILOGUE-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; SCALAR_EPILOGUE: scalar.ph: ; -; PREDICATED_TAIL_FOLDING-LABEL: define void @masked_strided_factor4 -; PREDICATED_TAIL_FOLDING-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] { -; PREDICATED_TAIL_FOLDING-NEXT: entry: -; PREDICATED_TAIL_FOLDING-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; PREDICATED_TAIL_FOLDING: vector.ph: -; PREDICATED_TAIL_FOLDING-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 -; PREDICATED_TAIL_FOLDING-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; PREDICATED_TAIL_FOLDING-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0 -; PREDICATED_TAIL_FOLDING-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: br label [[VECTOR_BODY:%.*]] -; PREDICATED_TAIL_FOLDING: vector.body: -; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 2) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 2) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 3) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP12]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP13]] -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP14]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP15]] -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP16]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP17]] -; PREDICATED_TAIL_FOLDING-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP18]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP21:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER4]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER5]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP22:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP21]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP23]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP19]], <vscale x 16 x ptr> [[TMP24]], i32 1, <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP25:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP20]], <vscale x 16 x ptr> [[TMP26]], i32 1, <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP27:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP27]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP21]], <vscale x 16 x ptr> [[TMP28]], i32 1, <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP29:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP29]] -; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP22]], <vscale x 16 x ptr> [[TMP30]], i32 1, <vscale x 16 x i1> [[TMP6]]) -; PREDICATED_TAIL_FOLDING-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; PREDICATED_TAIL_FOLDING: middle.block: -; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_DATA-LABEL: define void @masked_strided_factor4 +; PREDICATED_DATA-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] { +; PREDICATED_DATA-NEXT: entry: +; PREDICATED_DATA-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; PREDICATED_DATA: vector.ph: +; PREDICATED_DATA-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 +; PREDICATED_DATA-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 +; PREDICATED_DATA-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 +; PREDICATED_DATA-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] +; PREDICATED_DATA-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; PREDICATED_DATA-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP3]], i64 0 +; PREDICATED_DATA-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-NEXT: br label [[VECTOR_BODY:%.*]] +; PREDICATED_DATA: vector.body: +; PREDICATED_DATA-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 1024) +; PREDICATED_DATA-NEXT: [[TMP5:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_DATA-NEXT: [[TMP6:%.*]] = select <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1> zeroinitializer +; PREDICATED_DATA-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 2) +; PREDICATED_DATA-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) +; PREDICATED_DATA-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 2) +; PREDICATED_DATA-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 3) +; PREDICATED_DATA-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP12]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP13]] +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP14]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP15:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP15]] +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP16]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP17]] +; PREDICATED_DATA-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> [[TMP18]], i32 1, <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i8> poison) +; PREDICATED_DATA-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) +; PREDICATED_DATA-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]] +; PREDICATED_DATA-NEXT: [[TMP21:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER4]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER5]]) +; PREDICATED_DATA-NEXT: [[TMP22:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP21]] +; PREDICATED_DATA-NEXT: [[TMP23:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP23]] +; PREDICATED_DATA-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP19]], <vscale x 16 x ptr> [[TMP24]], i32 1, <vscale x 16 x i1> [[TMP6]]) +; PREDICATED_DATA-NEXT: [[TMP25:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]] +; PREDICATED_DATA-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP20]], <vscale x 16 x ptr> [[TMP26]], i32 1, <vscale x 16 x i1> [[TMP6]]) +; PREDICATED_DATA-NEXT: [[TMP27:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP27]] +; PREDICATED_DATA-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP21]], <vscale x 16 x ptr> [[TMP28]], i32 1, <vscale x 16 x i1> [[TMP6]]) +; PREDICATED_DATA-NEXT: [[TMP29:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP29]] +; PREDICATED_DATA-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP22]], <vscale x 16 x ptr> [[TMP30]], i32 1, <vscale x 16 x i1> [[TMP6]]) +; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] +; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; PREDICATED_DATA-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_DATA: middle.block: +; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] +; PREDICATED_DATA: scalar.ph: ; -; PREDICATED_EVL-LABEL: define void @masked_strided_factor4 -; PREDICATED_EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] { -; PREDICATED_EVL-NEXT: entry: -; PREDICATED_EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; PREDICATED_EVL: vector.ph: -; PREDICATED_EVL-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 -; PREDICATED_EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_EVL-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 -; PREDICATED_EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 -; PREDICATED_EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] -; PREDICATED_EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; PREDICATED_EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; PREDICATED_EVL-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_EVL-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() -; PREDICATED_EVL-NEXT: br label [[VECTOR_BODY:%.*]] -; PREDICATED_EVL: vector.body: -; PREDICATED_EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; PREDICATED_EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]] -; PREDICATED_EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 -; PREDICATED_EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer -; PREDICATED_EVL-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_EVL-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 2) -; PREDICATED_EVL-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) -; PREDICATED_EVL-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 2) -; PREDICATED_EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 3) -; PREDICATED_EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] -; PREDICATED_EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP13]] -; PREDICATED_EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP15:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP15]] -; PREDICATED_EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP16]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP17]] -; PREDICATED_EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP18]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) -; PREDICATED_EVL-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]] -; PREDICATED_EVL-NEXT: [[TMP21:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER4]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER5]]) -; PREDICATED_EVL-NEXT: [[TMP22:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP21]] -; PREDICATED_EVL-NEXT: [[TMP23:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP23]] -; PREDICATED_EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP19]], <vscale x 16 x ptr> align 1 [[TMP24]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP25:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]] -; PREDICATED_EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP20]], <vscale x 16 x ptr> align 1 [[TMP26]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP27:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP27]] -; PREDICATED_EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP21]], <vscale x 16 x ptr> align 1 [[TMP28]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[TMP29:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> -; PREDICATED_EVL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP29]] -; PREDICATED_EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP22]], <vscale x 16 x ptr> align 1 [[TMP30]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) -; PREDICATED_EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]] -; PREDICATED_EVL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; PREDICATED_EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_EVL-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_EVL-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] -; PREDICATED_EVL: middle.block: -; PREDICATED_EVL-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_EVL: scalar.ph: +; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor4 +; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] { +; PREDICATED_DATA-WITH-EVL-NEXT: entry: +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; PREDICATED_DATA-WITH-EVL: vector.ph: +; PREDICATED_DATA-WITH-EVL-NEXT: [[CONV:%.*]] = zext i8 [[GUARD]] to i32 +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 4 +; PREDICATED_DATA-WITH-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP1]], 1023 +; PREDICATED_DATA-WITH-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 4 +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[CONV]], i64 0 +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32() +; PREDICATED_DATA-WITH-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; PREDICATED_DATA-WITH-EVL: vector.body: +; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0 +; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP7:%.*]] = shl nuw nsw <vscale x 16 x i32> [[VEC_IND]], splat (i32 2) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP8:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 1) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP9:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 2) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 16 x i32> [[TMP7]], splat (i32 3) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP11:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP11]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP12]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP13:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP13]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP15]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP16]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP17:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], <vscale x 16 x i64> [[TMP17]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 16 x i8> @llvm.vp.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> align 1 [[TMP18]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER3]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP20:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP19]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP21:%.*]] = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> [[WIDE_MASKED_GATHER4]], <vscale x 16 x i8> [[WIDE_MASKED_GATHER5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP22:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[TMP21]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP23:%.*]] = zext nneg <vscale x 16 x i32> [[TMP7]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP23]] +; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP19]], <vscale x 16 x ptr> align 1 [[TMP24]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP25:%.*]] = zext nneg <vscale x 16 x i32> [[TMP8]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]] +; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP20]], <vscale x 16 x ptr> align 1 [[TMP26]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP27:%.*]] = zext nneg <vscale x 16 x i32> [[TMP9]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP27]] +; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP21]], <vscale x 16 x ptr> align 1 [[TMP28]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP29:%.*]] = zext nneg <vscale x 16 x i32> [[TMP10]] to <vscale x 16 x i64> +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP29]] +; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP22]], <vscale x 16 x ptr> align 1 [[TMP30]], <vscale x 16 x i1> [[TMP6]], i32 [[TMP5]]) +; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; PREDICATED_DATA-WITH-EVL: middle.block: +; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]] +; PREDICATED_DATA-WITH-EVL: scalar.ph: ; entry: %conv = zext i8 %guard to i32 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll index 6c57d2f..e2641ab 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll @@ -133,15 +133,15 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 8) +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 8, i32 4, i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP8:%.*]], i32 0 -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP9]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i8> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr align 1 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 4 x i8> [[WIDE_MASKED_LOAD]], splat (i8 1) ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11:%.*]], i32 0 -; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP12]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i8> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr align 1 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i8> [[TMP10]], [[WIDE_MASKED_LOAD1]] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0 -; CHECK-NEXT: call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP13]], ptr [[TMP14]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.vp.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP13]], ptr align 1 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] @@ -358,3 +358,64 @@ for.end: ; preds = %for.body attributes #0 = { "target-features"="+v,+d" vscale_range(2, 1024) } +; This is a non-power-of-2 low trip count, so we will try to tail-fold this. But +; the reduction is a multiply which is only legal for fixed-length VFs. But +; fixed-length VFs aren't legal for the default tail-folding style +; data-with-evl, so make sure we gracefully fall back to data-without-lane-mask. + +define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) { +; CHECK-LABEL: @mul_non_pow_2_low_trip_count( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer +; CHECK-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15> +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ule <16 x i64> [[VEC_IV]], splat (i64 9) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 0 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP1]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison) +; CHECK-NEXT: [[TMP2]] = mul <16 x i8> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> [[TMP3]]) +; CHECK-NEXT: br label [[FOR_END:%.*]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1 +; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: for.end: +; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i8 [[MUL_LCSSA]] +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %rdx = phi i8 [ 2, %entry ], [ %mul, %for.body ] + %gep = getelementptr i8, ptr %a, i64 %iv + %0 = load i8, ptr %gep + %mul = mul i8 %0, %rdx + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, 10 + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret i8 %mul +} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index a1201dcf..0228811 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -7,29 +7,49 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[A]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = shl <16 x i64> [[BROADCAST_SPLAT2]], splat (i64 48) -; CHECK-NEXT: [[TMP1:%.*]] = ashr <16 x i64> [[TMP0]], splat (i64 52) -; CHECK-NEXT: [[TMP2:%.*]] = trunc <16 x i64> [[TMP1]] to <16 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 9, [[TMP2]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 2 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i8> poison, i8 [[B]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[A]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = shl <vscale x 2 x i64> [[BROADCAST_SPLAT2]], splat (i64 48) +; CHECK-NEXT: [[TMP6:%.*]] = ashr <vscale x 2 x i64> [[TMP5]], splat (i64 52) +; CHECK-NEXT: [[TMP7:%.*]] = trunc <vscale x 2 x i64> [[TMP6]] to <vscale x 2 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = zext <vscale x 2 x i8> [[BROADCAST_SPLAT]] to <vscale x 2 x i32> +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[P]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 2 x i32> [[TMP9]], splat (i32 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i32> zeroinitializer, [[TMP10]] ; CHECK-NEXT: br label [[FOR_COND:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_COND]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 9) -; CHECK-NEXT: [[TMP4:%.*]] = icmp sge <16 x i32> [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i1> [[TMP4]], <16 x i1> zeroinitializer -; CHECK-NEXT: [[PREDPHI:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP2]], <16 x i32> [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = shl <16 x i32> [[PREDPHI]], splat (i32 8) -; CHECK-NEXT: [[TMP8:%.*]] = trunc <16 x i32> [[TMP6]] to <16 x i8> -; CHECK-NEXT: [[TMP40:%.*]] = extractelement <16 x i8> [[TMP8]], i32 15 -; CHECK-NEXT: store i8 [[TMP40]], ptr [[P]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i32> [[VEC_IND]], splat (i32 16) -; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_COND]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ule <vscale x 2 x i32> [[VEC_IND]], splat (i32 8) +; CHECK-NEXT: [[TMP14:%.*]] = icmp sge <vscale x 2 x i32> [[VEC_IND]], splat (i32 2) +; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 2 x i1> [[TMP13]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i32> [[TMP7]], <vscale x 2 x i32> [[TMP8]] +; CHECK-NEXT: [[TMP16:%.*]] = shl <vscale x 2 x i32> [[PREDPHI]], splat (i32 8) +; CHECK-NEXT: [[TMP17:%.*]] = trunc <vscale x 2 x i32> [[TMP16]] to <vscale x 2 x i8> +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8> [[TMP17]], <vscale x 2 x ptr> align 1 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]]) +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP11]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT1:%.*]] ; CHECK: scalar.ph: @@ -52,7 +72,7 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 ; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1]], label [[EXIT1]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1]], label [[EXIT1]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -84,8 +104,9 @@ exit: ; preds = %for.body ret void } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse-output.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse-output.ll deleted file mode 100644 index 4844c2f..0000000 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse-output.ll +++ /dev/null @@ -1,690 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 -;; This is the loop in c++ being vectorize in this file with -;; vector.reverse -;; #pragma clang loop vectorize_width(4, scalable) -;; for (int i = N-1; i >= 0; --i) -;; a[i] = b[i] + 1.0; - -; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S < %s \ -; RUN: | FileCheck --check-prefix=RV64 %s - -; RUN: opt -passes=loop-vectorize -mtriple=riscv32 -mattr=+v -S < %s \ -; RUN: | FileCheck --check-prefix=RV32 %s - -; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -force-vector-interleave=2 -S < %s \ -; RUN: | FileCheck --check-prefix=RV64-UF2 %s - -define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { -; RV64-LABEL: define void @vector_reverse_i32( -; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { -; RV64-NEXT: [[ENTRY:.*]]: -; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV64: [[VECTOR_PH]]: -; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] -; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] -; RV64-NEXT: br label %[[VECTOR_BODY:.*]] -; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV64-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 -; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP8]] -; RV64-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP22:%.*]] = sub i64 [[TMP5]], 1 -; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP22]] -; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]] -; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP11]] -; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 -; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) -; RV64-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) -; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]] -; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP23:%.*]] = sub i64 [[TMP5]], 1 -; RV64-NEXT: [[TMP17:%.*]] = mul i64 -1, [[TMP23]] -; RV64-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] -; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP18]], i64 [[TMP17]] -; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP19]], align 4 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: -; RV64-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV64-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV64-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV_NEXT]] -; RV64-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; RV64-NEXT: [[ADD:%.*]] = add i32 [[TMP21]], 1 -; RV64-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV_NEXT]] -; RV64-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_A]], align 4 -; RV64-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV64-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] -; RV64: [[EXIT]]: -; RV64-NEXT: ret void -; -; RV32-LABEL: define void @vector_reverse_i32( -; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { -; RV32-NEXT: [[ENTRY:.*]]: -; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV32: [[VECTOR_PH]]: -; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] -; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] -; RV32-NEXT: br label %[[VECTOR_BODY:.*]] -; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV32-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 -; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP8]] -; RV32-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32 -; RV32-NEXT: [[TMP11:%.*]] = mul i32 0, [[TMP10]] -; RV32-NEXT: [[TMP24:%.*]] = sub i32 [[TMP10]], 1 -; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP24]] -; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 [[TMP11]] -; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP12]] -; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 -; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) -; RV32-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) -; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]] -; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 -; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] -; RV32-NEXT: [[TMP25:%.*]] = sub i32 [[TMP17]], 1 -; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP25]] -; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 [[TMP18]] -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i32 [[TMP19]] -; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP21]], align 4 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV32-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: -; RV32-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV32-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV32-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV_NEXT]] -; RV32-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; RV32-NEXT: [[ADD:%.*]] = add i32 [[TMP23]], 1 -; RV32-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV_NEXT]] -; RV32-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_A]], align 4 -; RV32-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] -; RV32: [[EXIT]]: -; RV32-NEXT: ret void -; -; RV64-UF2-LABEL: define void @vector_reverse_i32( -; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { -; RV64-UF2-NEXT: [[ENTRY:.*]]: -; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] -; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 -; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] -; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] -; RV64-UF2: [[VECTOR_BODY]]: -; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV64-UF2-NEXT: [[TMP9:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 -; RV64-UF2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP9]] -; RV64-UF2-NEXT: [[TMP11:%.*]] = mul i64 0, [[TMP5]] -; RV64-UF2-NEXT: [[TMP32:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP32]] -; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i64 [[TMP11]] -; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]] -; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]] -; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP16:%.*]] = mul i64 -1, [[TMP33]] -; RV64-UF2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i64 [[TMP15]] -; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 [[TMP16]] -; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 -; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) -; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP18]], align 4 -; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD1]]) -; RV64-UF2-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) -; RV64-UF2-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[REVERSE2]], splat (i32 1) -; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP9]] -; RV64-UF2-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP5]] -; RV64-UF2-NEXT: [[TMP34:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP23:%.*]] = mul i64 -1, [[TMP34]] -; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP22]] -; RV64-UF2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP23]] -; RV64-UF2-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP5]] -; RV64-UF2-NEXT: [[TMP35:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP27:%.*]] = mul i64 -1, [[TMP35]] -; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP26]] -; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i64 [[TMP27]] -; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP19]]) -; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE3]], ptr [[TMP25]], align 4 -; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP20]]) -; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP29]], align 4 -; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; RV64-UF2-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] -; RV64-UF2: [[FOR_BODY]]: -; RV64-UF2-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV64-UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV64-UF2-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV_NEXT]] -; RV64-UF2-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; RV64-UF2-NEXT: [[ADD:%.*]] = add i32 [[TMP31]], 1 -; RV64-UF2-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV_NEXT]] -; RV64-UF2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_A]], align 4 -; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] -; RV64-UF2: [[EXIT]]: -; RV64-UF2-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] - %iv.next = add nsw i64 %dec.iv, -1 - %arrayidx.b = getelementptr inbounds i32, ptr %B, i64 %iv.next - %0 = load i32, ptr %arrayidx.b, align 4 - %add = add i32 %0, 1 - %arrayidx.a = getelementptr inbounds i32, ptr %A, i64 %iv.next - store i32 %add, ptr %arrayidx.a, align 4 - %cmp = icmp ugt i64 %dec.iv, 1 - br i1 %cmp, label %for.body, label %exit, !llvm.loop !0 - -exit: - ret void -} - -define void @vector_reverse_f32(ptr noalias %A, ptr noalias %B) { -; RV64-LABEL: define void @vector_reverse_f32( -; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { -; RV64-NEXT: [[ENTRY:.*]]: -; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV64: [[VECTOR_PH]]: -; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] -; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] -; RV64-NEXT: br label %[[VECTOR_BODY:.*]] -; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV64-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 -; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP8]] -; RV64-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP22:%.*]] = sub i64 [[TMP5]], 1 -; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP22]] -; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP10]] -; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP11]] -; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4 -; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) -; RV64-NEXT: [[TMP14:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) -; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]] -; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] -; RV64-NEXT: [[TMP23:%.*]] = sub i64 [[TMP5]], 1 -; RV64-NEXT: [[TMP17:%.*]] = mul i64 -1, [[TMP23]] -; RV64-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]] -; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[TMP17]] -; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]]) -; RV64-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP19]], align 4 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV64-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: -; RV64-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV64-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV64-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV_NEXT]] -; RV64-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX_B]], align 4 -; RV64-NEXT: [[FADD:%.*]] = fadd float [[TMP21]], 1.000000e+00 -; RV64-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV_NEXT]] -; RV64-NEXT: store float [[FADD]], ptr [[ARRAYIDX_A]], align 4 -; RV64-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV64-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] -; RV64: [[EXIT]]: -; RV64-NEXT: ret void -; -; RV32-LABEL: define void @vector_reverse_f32( -; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { -; RV32-NEXT: [[ENTRY:.*]]: -; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV32: [[VECTOR_PH]]: -; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] -; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] -; RV32-NEXT: br label %[[VECTOR_BODY:.*]] -; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV32-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 -; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP8]] -; RV32-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32 -; RV32-NEXT: [[TMP11:%.*]] = mul i32 0, [[TMP10]] -; RV32-NEXT: [[TMP24:%.*]] = sub i32 [[TMP10]], 1 -; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP24]] -; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i32 [[TMP11]] -; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP12]] -; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4 -; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) -; RV32-NEXT: [[TMP15:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) -; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]] -; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 -; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] -; RV32-NEXT: [[TMP25:%.*]] = sub i32 [[TMP17]], 1 -; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP25]] -; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP18]] -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP19]] -; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]]) -; RV32-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP21]], align 4 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; RV32-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV32-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV32-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: -; RV32-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV32-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV32-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV_NEXT]] -; RV32-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX_B]], align 4 -; RV32-NEXT: [[FADD:%.*]] = fadd float [[TMP23]], 1.000000e+00 -; RV32-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV_NEXT]] -; RV32-NEXT: store float [[FADD]], ptr [[ARRAYIDX_A]], align 4 -; RV32-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] -; RV32: [[EXIT]]: -; RV32-NEXT: ret void -; -; RV64-UF2-LABEL: define void @vector_reverse_f32( -; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { -; RV64-UF2-NEXT: [[ENTRY:.*]]: -; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 -; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] -; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] -; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] -; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 -; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 -; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] -; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] -; RV64-UF2: [[VECTOR_BODY]]: -; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV64-UF2-NEXT: [[TMP9:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 -; RV64-UF2-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP9]] -; RV64-UF2-NEXT: [[TMP11:%.*]] = mul i64 0, [[TMP5]] -; RV64-UF2-NEXT: [[TMP32:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP32]] -; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP11]] -; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP12]] -; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]] -; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP16:%.*]] = mul i64 -1, [[TMP33]] -; RV64-UF2-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP15]] -; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[TMP16]] -; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4 -; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) -; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP18]], align 4 -; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1]]) -; RV64-UF2-NEXT: [[TMP19:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) -; RV64-UF2-NEXT: [[TMP20:%.*]] = fadd <vscale x 4 x float> [[REVERSE2]], splat (float 1.000000e+00) -; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP9]] -; RV64-UF2-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP5]] -; RV64-UF2-NEXT: [[TMP34:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP23:%.*]] = mul i64 -1, [[TMP34]] -; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[TMP22]] -; RV64-UF2-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP23]] -; RV64-UF2-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP5]] -; RV64-UF2-NEXT: [[TMP35:%.*]] = sub i64 [[TMP5]], 1 -; RV64-UF2-NEXT: [[TMP27:%.*]] = mul i64 -1, [[TMP35]] -; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[TMP26]] -; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP27]] -; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP19]]) -; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE3]], ptr [[TMP25]], align 4 -; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP20]]) -; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP29]], align 4 -; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; RV64-UF2-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] -; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] -; RV64-UF2: [[FOR_BODY]]: -; RV64-UF2-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV64-UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV64-UF2-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV_NEXT]] -; RV64-UF2-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX_B]], align 4 -; RV64-UF2-NEXT: [[FADD:%.*]] = fadd float [[TMP31]], 1.000000e+00 -; RV64-UF2-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV_NEXT]] -; RV64-UF2-NEXT: store float [[FADD]], ptr [[ARRAYIDX_A]], align 4 -; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] -; RV64-UF2: [[EXIT]]: -; RV64-UF2-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] - %iv.next = add nsw i64 %dec.iv, -1 - %arrayidx.b = getelementptr inbounds float, ptr %B, i64 %iv.next - %0 = load float, ptr %arrayidx.b, align 4 - %fadd = fadd float %0, 1.000000e+00 - %arrayidx.a = getelementptr inbounds float, ptr %A, i64 %iv.next - store float %fadd, ptr %arrayidx.a, align 4 - %cmp = icmp ugt i64 %dec.iv, 1 - br i1 %cmp, label %for.body, label %exit, !llvm.loop !0 - -exit: - ret void -} - -define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) { -; RV64-LABEL: define void @vector_reverse_irregular_type( -; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { -; RV64-NEXT: [[ENTRY:.*]]: -; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV64: [[VECTOR_PH]]: -; RV64-NEXT: br label %[[VECTOR_BODY:.*]] -; RV64: [[VECTOR_BODY]]: -; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV64-NEXT: [[DEC_IV:%.*]] = add i64 [[OFFSET_IDX]], 0 -; RV64-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1 -; RV64-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2 -; RV64-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3 -; RV64-NEXT: [[IV_NEXT:%.*]] = add nsw i64 [[DEC_IV]], -1 -; RV64-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP1]], -1 -; RV64-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP2]], -1 -; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP3]], -1 -; RV64-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[IV_NEXT]] -; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP5]] -; RV64-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP6]] -; RV64-NEXT: [[TMP11:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP7]] -; RV64-NEXT: [[TMP0:%.*]] = load i7, ptr [[ARRAYIDX_B]], align 1 -; RV64-NEXT: [[TMP13:%.*]] = load i7, ptr [[TMP9]], align 1 -; RV64-NEXT: [[TMP14:%.*]] = load i7, ptr [[TMP10]], align 1 -; RV64-NEXT: [[TMP15:%.*]] = load i7, ptr [[TMP11]], align 1 -; RV64-NEXT: [[TMP16:%.*]] = insertelement <4 x i7> poison, i7 [[TMP0]], i32 0 -; RV64-NEXT: [[TMP17:%.*]] = insertelement <4 x i7> [[TMP16]], i7 [[TMP13]], i32 1 -; RV64-NEXT: [[TMP18:%.*]] = insertelement <4 x i7> [[TMP17]], i7 [[TMP14]], i32 2 -; RV64-NEXT: [[TMP19:%.*]] = insertelement <4 x i7> [[TMP18]], i7 [[TMP15]], i32 3 -; RV64-NEXT: [[TMP20:%.*]] = add <4 x i7> [[TMP19]], splat (i7 1) -; RV64-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[IV_NEXT]] -; RV64-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP5]] -; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP6]] -; RV64-NEXT: [[TMP24:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP7]] -; RV64-NEXT: [[TMP25:%.*]] = extractelement <4 x i7> [[TMP20]], i32 0 -; RV64-NEXT: store i7 [[TMP25]], ptr [[TMP21]], align 1 -; RV64-NEXT: [[TMP26:%.*]] = extractelement <4 x i7> [[TMP20]], i32 1 -; RV64-NEXT: store i7 [[TMP26]], ptr [[TMP22]], align 1 -; RV64-NEXT: [[TMP27:%.*]] = extractelement <4 x i7> [[TMP20]], i32 2 -; RV64-NEXT: store i7 [[TMP27]], ptr [[TMP23]], align 1 -; RV64-NEXT: [[TMP28:%.*]] = extractelement <4 x i7> [[TMP20]], i32 3 -; RV64-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1 -; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; RV64-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020 -; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV64: [[SCALAR_PH]]: -; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: -; RV64-NEXT: [[DEC_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[FOR_BODY]] ] -; RV64-NEXT: [[IV_NEXT1]] = add nsw i64 [[DEC_IV1]], -1 -; RV64-NEXT: [[ARRAYIDX_B1:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[IV_NEXT1]] -; RV64-NEXT: [[TMP30:%.*]] = load i7, ptr [[ARRAYIDX_B1]], align 1 -; RV64-NEXT: [[ADD:%.*]] = add i7 [[TMP30]], 1 -; RV64-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[IV_NEXT1]] -; RV64-NEXT: store i7 [[ADD]], ptr [[ARRAYIDX_A]], align 1 -; RV64-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV1]], 1 -; RV64-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] -; RV64: [[EXIT]]: -; RV64-NEXT: ret void -; -; RV32-LABEL: define void @vector_reverse_irregular_type( -; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { -; RV32-NEXT: [[ENTRY:.*]]: -; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV32: [[VECTOR_PH]]: -; RV32-NEXT: br label %[[VECTOR_BODY:.*]] -; RV32: [[VECTOR_BODY]]: -; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV32-NEXT: [[DEC_IV:%.*]] = add i64 [[OFFSET_IDX]], 0 -; RV32-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1 -; RV32-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2 -; RV32-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3 -; RV32-NEXT: [[IV_NEXT:%.*]] = add nsw i64 [[DEC_IV]], -1 -; RV32-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP1]], -1 -; RV32-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP2]], -1 -; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP3]], -1 -; RV32-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[IV_NEXT]] -; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP5]] -; RV32-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP6]] -; RV32-NEXT: [[TMP11:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP7]] -; RV32-NEXT: [[TMP0:%.*]] = load i7, ptr [[ARRAYIDX_B]], align 1 -; RV32-NEXT: [[TMP13:%.*]] = load i7, ptr [[TMP9]], align 1 -; RV32-NEXT: [[TMP14:%.*]] = load i7, ptr [[TMP10]], align 1 -; RV32-NEXT: [[TMP15:%.*]] = load i7, ptr [[TMP11]], align 1 -; RV32-NEXT: [[TMP16:%.*]] = insertelement <4 x i7> poison, i7 [[TMP0]], i32 0 -; RV32-NEXT: [[TMP17:%.*]] = insertelement <4 x i7> [[TMP16]], i7 [[TMP13]], i32 1 -; RV32-NEXT: [[TMP18:%.*]] = insertelement <4 x i7> [[TMP17]], i7 [[TMP14]], i32 2 -; RV32-NEXT: [[TMP19:%.*]] = insertelement <4 x i7> [[TMP18]], i7 [[TMP15]], i32 3 -; RV32-NEXT: [[TMP20:%.*]] = add <4 x i7> [[TMP19]], splat (i7 1) -; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[IV_NEXT]] -; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP5]] -; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP6]] -; RV32-NEXT: [[TMP24:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP7]] -; RV32-NEXT: [[TMP25:%.*]] = extractelement <4 x i7> [[TMP20]], i32 0 -; RV32-NEXT: store i7 [[TMP25]], ptr [[TMP21]], align 1 -; RV32-NEXT: [[TMP26:%.*]] = extractelement <4 x i7> [[TMP20]], i32 1 -; RV32-NEXT: store i7 [[TMP26]], ptr [[TMP22]], align 1 -; RV32-NEXT: [[TMP27:%.*]] = extractelement <4 x i7> [[TMP20]], i32 2 -; RV32-NEXT: store i7 [[TMP27]], ptr [[TMP23]], align 1 -; RV32-NEXT: [[TMP28:%.*]] = extractelement <4 x i7> [[TMP20]], i32 3 -; RV32-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1 -; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; RV32-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020 -; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV32: [[SCALAR_PH]]: -; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: -; RV32-NEXT: [[DEC_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[FOR_BODY]] ] -; RV32-NEXT: [[IV_NEXT1]] = add nsw i64 [[DEC_IV1]], -1 -; RV32-NEXT: [[ARRAYIDX_B1:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[IV_NEXT1]] -; RV32-NEXT: [[TMP30:%.*]] = load i7, ptr [[ARRAYIDX_B1]], align 1 -; RV32-NEXT: [[ADD:%.*]] = add i7 [[TMP30]], 1 -; RV32-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[IV_NEXT1]] -; RV32-NEXT: store i7 [[ADD]], ptr [[ARRAYIDX_A]], align 1 -; RV32-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV1]], 1 -; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] -; RV32: [[EXIT]]: -; RV32-NEXT: ret void -; -; RV64-UF2-LABEL: define void @vector_reverse_irregular_type( -; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { -; RV64-UF2-NEXT: [[ENTRY:.*]]: -; RV64-UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; RV64-UF2: [[VECTOR_PH]]: -; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] -; RV64-UF2: [[VECTOR_BODY]]: -; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] -; RV64-UF2-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 0 -; RV64-UF2-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 -; RV64-UF2-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], -2 -; RV64-UF2-NEXT: [[TMP24:%.*]] = add i64 [[OFFSET_IDX]], -3 -; RV64-UF2-NEXT: [[TMP25:%.*]] = add i64 [[OFFSET_IDX]], -4 -; RV64-UF2-NEXT: [[TMP42:%.*]] = add i64 [[OFFSET_IDX]], -5 -; RV64-UF2-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], -6 -; RV64-UF2-NEXT: [[TMP50:%.*]] = add i64 [[OFFSET_IDX]], -7 -; RV64-UF2-NEXT: [[TMP1:%.*]] = add nsw i64 [[TMP16]], -1 -; RV64-UF2-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP0]], -1 -; RV64-UF2-NEXT: [[TMP51:%.*]] = add nsw i64 [[TMP17]], -1 -; RV64-UF2-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP24]], -1 -; RV64-UF2-NEXT: [[TMP59:%.*]] = add nsw i64 [[TMP25]], -1 -; RV64-UF2-NEXT: [[TMP13:%.*]] = add nsw i64 [[TMP42]], -1 -; RV64-UF2-NEXT: [[TMP14:%.*]] = add nsw i64 [[TMP43]], -1 -; RV64-UF2-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP50]], -1 -; RV64-UF2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP1]] -; RV64-UF2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP2]] -; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP51]] -; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP11]] -; RV64-UF2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP59]] -; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP13]] -; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP14]] -; RV64-UF2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP15]] -; RV64-UF2-NEXT: [[TMP5:%.*]] = load i7, ptr [[TMP3]], align 1 -; RV64-UF2-NEXT: [[TMP6:%.*]] = load i7, ptr [[TMP4]], align 1 -; RV64-UF2-NEXT: [[TMP26:%.*]] = load i7, ptr [[TMP18]], align 1 -; RV64-UF2-NEXT: [[TMP27:%.*]] = load i7, ptr [[TMP19]], align 1 -; RV64-UF2-NEXT: [[TMP28:%.*]] = insertelement <4 x i7> poison, i7 [[TMP5]], i32 0 -; RV64-UF2-NEXT: [[TMP29:%.*]] = insertelement <4 x i7> [[TMP28]], i7 [[TMP6]], i32 1 -; RV64-UF2-NEXT: [[TMP30:%.*]] = insertelement <4 x i7> [[TMP29]], i7 [[TMP26]], i32 2 -; RV64-UF2-NEXT: [[TMP31:%.*]] = insertelement <4 x i7> [[TMP30]], i7 [[TMP27]], i32 3 -; RV64-UF2-NEXT: [[TMP32:%.*]] = load i7, ptr [[TMP20]], align 1 -; RV64-UF2-NEXT: [[TMP33:%.*]] = load i7, ptr [[TMP21]], align 1 -; RV64-UF2-NEXT: [[TMP34:%.*]] = load i7, ptr [[TMP22]], align 1 -; RV64-UF2-NEXT: [[TMP35:%.*]] = load i7, ptr [[TMP23]], align 1 -; RV64-UF2-NEXT: [[TMP36:%.*]] = insertelement <4 x i7> poison, i7 [[TMP32]], i32 0 -; RV64-UF2-NEXT: [[TMP37:%.*]] = insertelement <4 x i7> [[TMP36]], i7 [[TMP33]], i32 1 -; RV64-UF2-NEXT: [[TMP38:%.*]] = insertelement <4 x i7> [[TMP37]], i7 [[TMP34]], i32 2 -; RV64-UF2-NEXT: [[TMP39:%.*]] = insertelement <4 x i7> [[TMP38]], i7 [[TMP35]], i32 3 -; RV64-UF2-NEXT: [[TMP40:%.*]] = add <4 x i7> [[TMP31]], splat (i7 1) -; RV64-UF2-NEXT: [[TMP41:%.*]] = add <4 x i7> [[TMP39]], splat (i7 1) -; RV64-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP1]] -; RV64-UF2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP2]] -; RV64-UF2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP51]] -; RV64-UF2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP11]] -; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP59]] -; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP13]] -; RV64-UF2-NEXT: [[TMP48:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP14]] -; RV64-UF2-NEXT: [[TMP49:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP15]] -; RV64-UF2-NEXT: [[TMP7:%.*]] = extractelement <4 x i7> [[TMP40]], i32 0 -; RV64-UF2-NEXT: store i7 [[TMP7]], ptr [[TMP9]], align 1 -; RV64-UF2-NEXT: [[TMP8:%.*]] = extractelement <4 x i7> [[TMP40]], i32 1 -; RV64-UF2-NEXT: store i7 [[TMP8]], ptr [[TMP10]], align 1 -; RV64-UF2-NEXT: [[TMP52:%.*]] = extractelement <4 x i7> [[TMP40]], i32 2 -; RV64-UF2-NEXT: store i7 [[TMP52]], ptr [[TMP44]], align 1 -; RV64-UF2-NEXT: [[TMP53:%.*]] = extractelement <4 x i7> [[TMP40]], i32 3 -; RV64-UF2-NEXT: store i7 [[TMP53]], ptr [[TMP45]], align 1 -; RV64-UF2-NEXT: [[TMP54:%.*]] = extractelement <4 x i7> [[TMP41]], i32 0 -; RV64-UF2-NEXT: store i7 [[TMP54]], ptr [[TMP46]], align 1 -; RV64-UF2-NEXT: [[TMP55:%.*]] = extractelement <4 x i7> [[TMP41]], i32 1 -; RV64-UF2-NEXT: store i7 [[TMP55]], ptr [[TMP47]], align 1 -; RV64-UF2-NEXT: [[TMP56:%.*]] = extractelement <4 x i7> [[TMP41]], i32 2 -; RV64-UF2-NEXT: store i7 [[TMP56]], ptr [[TMP48]], align 1 -; RV64-UF2-NEXT: [[TMP57:%.*]] = extractelement <4 x i7> [[TMP41]], i32 3 -; RV64-UF2-NEXT: store i7 [[TMP57]], ptr [[TMP49]], align 1 -; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; RV64-UF2-NEXT: [[TMP58:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1016 -; RV64-UF2-NEXT: br i1 [[TMP58]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; RV64-UF2: [[MIDDLE_BLOCK]]: -; RV64-UF2-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] -; RV64-UF2: [[SCALAR_PH]]: -; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 7, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] -; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] -; RV64-UF2: [[FOR_BODY]]: -; RV64-UF2-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; RV64-UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 -; RV64-UF2-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[IV_NEXT]] -; RV64-UF2-NEXT: [[TMP12:%.*]] = load i7, ptr [[ARRAYIDX_B]], align 1 -; RV64-UF2-NEXT: [[ADD:%.*]] = add i7 [[TMP12]], 1 -; RV64-UF2-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[IV_NEXT]] -; RV64-UF2-NEXT: store i7 [[ADD]], ptr [[ARRAYIDX_A]], align 1 -; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 -; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] -; RV64-UF2: [[EXIT]]: -; RV64-UF2-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] - %iv.next = add nsw i64 %dec.iv, -1 - %arrayidx.b = getelementptr inbounds i7, ptr %B, i64 %iv.next - %0 = load i7, ptr %arrayidx.b, align 1 - %add = add i7 %0, 1 - %arrayidx.a = getelementptr inbounds i7, ptr %A, i64 %iv.next - store i7 %add, ptr %arrayidx.a, align 1 - %cmp = icmp ugt i64 %dec.iv, 1 - br i1 %cmp, label %for.body, label %exit, !llvm.loop !4 - -exit: - ret void -} - -!0 = distinct !{!0, !1, !2, !3} -!1 = !{!"llvm.loop.vectorize.width", i32 4} -!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} -!3 = !{!"llvm.loop.vectorize.enable", i1 true} -!4 = distinct !{!4, !1, !3} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index ad445c8..f59ab56 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -1,400 +1,455 @@ -; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5 -; This is the loop in c++ being vectorize in this file with -;vector.reverse -; #pragma clang loop vectorize_width(4, scalable) -; for (int i = N-1; i >= 0; --i) -; a[i] = b[i] + 1.0; +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "for.body:" --version 5 +;; This is the loop in c++ being vectorize in this file with +;; vector.reverse +;; #pragma clang loop vectorize_width(4, scalable) +;; for (int i = N-1; i >= 0; --i) +;; a[i] = b[i] + 1.0; -; REQUIRES: asserts -; RUN: opt -passes=loop-vectorize,dce,instcombine -mtriple riscv64-linux-gnu \ -; RUN: -mattr=+v -debug-only=loop-vectorize,vplan -scalable-vectorization=on \ -; RUN: -disable-output < %s 2>&1 | FileCheck %s +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S < %s \ +; RUN: | FileCheck --check-prefix=RV64 %s + +; RUN: opt -passes=loop-vectorize -mtriple=riscv32 -mattr=+v -S < %s \ +; RUN: | FileCheck --check-prefix=RV32 %s + +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \ +; RUN: -force-vector-interleave=2 -S < %s \ +; RUN: | FileCheck --check-prefix=RV64-UF2 %s + +define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { +; RV64-LABEL: define void @vector_reverse_i32( +; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; RV64-NEXT: [[ENTRY:.*]]: +; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 +; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]] +; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]] +; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 [[TMP9]] +; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP11]] +; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) +; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]] +; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] +; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] +; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]] +; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]]) +; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_BODY]]: +; +; RV32-LABEL: define void @vector_reverse_i32( +; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; RV32-NEXT: [[ENTRY:.*]]: +; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 +; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]] +; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]] +; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1 +; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]] +; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 [[TMP10]] +; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP12]] +; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) +; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]] +; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] +; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1 +; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]] +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 [[TMP18]] +; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 [[TMP20]] +; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]]) +; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP22]], align 4 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_BODY]]: +; +; RV64-UF2-LABEL: define void @vector_reverse_i32( +; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; RV64-UF2-NEXT: [[ENTRY:.*]]: +; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-UF2-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 +; RV64-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP8]] +; RV64-UF2-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]] +; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]] +; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]] +; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP17:%.*]] = mul i64 -1, [[TMP16]] +; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP15]] +; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP18]], i64 [[TMP17]] +; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP19]], align 4 +; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD1]]) +; RV64-UF2-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) +; RV64-UF2-NEXT: [[TMP21:%.*]] = add <vscale x 4 x i32> [[REVERSE2]], splat (i32 1) +; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]] +; RV64-UF2-NEXT: [[TMP23:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 -1, [[TMP24]] +; RV64-UF2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i64 [[TMP25]] +; RV64-UF2-NEXT: [[TMP28:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP29:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP29]] +; RV64-UF2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 [[TMP28]] +; RV64-UF2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[TMP31]], i64 [[TMP30]] +; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP20]]) +; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE3]], ptr [[TMP27]], align 4 +; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP21]]) +; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP32]], align 4 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; RV64-UF2-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[TMP33]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_BODY]]: +; +entry: + br label %for.body + +for.body: + %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] + %iv.next = add nsw i64 %dec.iv, -1 + %arrayidx.b = getelementptr inbounds i32, ptr %B, i64 %iv.next + %0 = load i32, ptr %arrayidx.b, align 4 + %add = add i32 %0, 1 + %arrayidx.a = getelementptr inbounds i32, ptr %A, i64 %iv.next + store i32 %add, ptr %arrayidx.a, align 4 + %cmp = icmp ugt i64 %dec.iv, 1 + br i1 %cmp, label %for.body, label %exit, !llvm.loop !0 + +exit: + ret void +} define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) { -; CHECK-LABEL: 'vector_reverse_i64' -; CHECK-NEXT: LV: Loop hints: force=enabled width=vscale x 4 interleave=0 -; CHECK-NEXT: LV: Found a loop: for.body -; CHECK-NEXT: LV: Found an induction variable. -; CHECK-NEXT: LV: Found an induction variable. -; CHECK-NEXT: LV: Did not find one integer induction var. -; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)! -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Found trip count: 0 -; CHECK-NEXT: LV: Found maximum trip count: 4294967295 -; CHECK-NEXT: LV: Scalable vectorization is available -; CHECK-NEXT: LV: The max safe fixed VF is: 67108864. -; CHECK-NEXT: LV: The max safe scalable VF is: vscale x 4294967295. -; CHECK-NEXT: LV: Found uniform instruction: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Found uniform instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Found uniform instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: LV: Found uniform instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] -; CHECK-NEXT: LV: Found uniform instruction: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 -; CHECK-NEXT: LV: Using user VF vscale x 4. -; CHECK-NEXT: Creating VPBasicBlock for for.body -; CHECK-NEXT: VPlan 'Plain CFG -; CHECK-NEXT: for UF>=1' { -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body.preheader>: -; CHECK-NEXT: IR %0 = zext i32 %n to i64 -; CHECK-NEXT: Successor(s): for.body -; CHECK-EMPTY: -; CHECK-NEXT: for.body: -; CHECK-NEXT: WIDEN-PHI ir<%indvars.iv> = phi [ ir<%indvars.iv.next>, for.body ], [ ir<%0>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: WIDEN-PHI ir<%i.0.in8> = phi [ ir<%i.0>, for.body ], [ ir<%n>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: EMIT ir<%i.0> = add ir<%i.0.in8>, ir<-1> -; CHECK-NEXT: EMIT-SCALAR ir<%idxprom> = zext ir<%i.0> -; CHECK-NEXT: EMIT ir<%arrayidx> = getelementptr ir<%B>, ir<%idxprom> -; CHECK-NEXT: EMIT ir<%1> = load ir<%arrayidx> -; CHECK-NEXT: EMIT ir<%add9> = add ir<%1>, ir<1> -; CHECK-NEXT: EMIT ir<%arrayidx3> = getelementptr ir<%A>, ir<%idxprom> -; CHECK-NEXT: EMIT store ir<%add9>, ir<%arrayidx3> -; CHECK-NEXT: EMIT ir<%cmp> = icmp ir<%indvars.iv>, ir<1> -; CHECK-NEXT: EMIT ir<%indvars.iv.next> = add ir<%indvars.iv>, ir<-1> -; CHECK-NEXT: EMIT branch-on-cond ir<%cmp> -; CHECK-NEXT: Successor(s): for.body, ir-bb<for.cond.cleanup.loopexit> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>: -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Scalarizing: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Scalarizing: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Scalarizing: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: VPlan 'Initial VPlan for VF={vscale x 4},UF>=1' { -; CHECK-NEXT: Live-in vp<%0> = VF -; CHECK-NEXT: Live-in vp<%1> = VF * UF -; CHECK-NEXT: Live-in vp<%2> = vector-trip-count -; CHECK-NEXT: vp<%3> = original trip-count -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body.preheader>: -; CHECK-NEXT: IR %0 = zext i32 %n to i64 -; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (zext i32 %n to i64) -; CHECK-NEXT: Successor(s): scalar.ph, vector.ph -; CHECK-EMPTY: -; CHECK-NEXT: vector.ph: -; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%0> + vp<%2> * ir<-1> -; CHECK-NEXT: vp<%5> = DERIVED-IV ir<%n> + vp<%2> * ir<-1> -; CHECK-NEXT: Successor(s): vector loop -; CHECK-EMPTY: -; CHECK-NEXT: <x1> vector loop: { -; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next> -; CHECK-NEXT: vp<%7> = DERIVED-IV ir<%n> + vp<%6> * ir<-1> -; CHECK-NEXT: vp<%8> = SCALAR-STEPS vp<%7>, ir<-1>, vp<%0> -; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%8>, ir<-1> -; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0> -; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom> -; CHECK-NEXT: vp<%9> = vector-end-pointer inbounds ir<%arrayidx>, vp<%0> -; CHECK-NEXT: WIDEN ir<%1> = load vp<%9> -; CHECK-NEXT: WIDEN ir<%add9> = add ir<%1>, ir<1> -; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom> -; CHECK-NEXT: vp<%10> = vector-end-pointer inbounds ir<%arrayidx3>, vp<%0> -; CHECK-NEXT: WIDEN store vp<%10>, ir<%add9> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1> -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: Successor(s): middle.block -; CHECK-EMPTY: -; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2> -; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> -; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, scalar.ph -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>: -; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%0>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<%5>, middle.block ], [ ir<%n>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: Successor(s): ir-bb<for.body> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body>: -; CHECK-NEXT: IR %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] (extra operand: vp<%bc.resume.val> from scalar.ph) -; CHECK-NEXT: IR %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] (extra operand: vp<%bc.resume.val>.1 from scalar.ph) -; CHECK-NEXT: IR %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: IR %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: IR %1 = load i32, ptr %arrayidx, align 4 -; CHECK-NEXT: IR %add9 = add i32 %1, 1 -; CHECK-NEXT: IR %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: IR store i32 %add9, ptr %arrayidx3, align 4 -; CHECK-NEXT: IR %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: IR %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 -; CHECK-NEXT: LV(REG): Calculating max register usage: -; CHECK-NEXT: LV(REG): At #0 Interval # 0 -; CHECK-NEXT: LV(REG): At #1 Interval # 1 -; CHECK-NEXT: LV(REG): At #2 Interval # 2 -; CHECK-NEXT: LV(REG): At #3 Interval # 2 -; CHECK-NEXT: LV(REG): At #4 Interval # 2 -; CHECK-NEXT: LV(REG): At #5 Interval # 2 -; CHECK-NEXT: LV(REG): At #6 Interval # 3 -; CHECK-NEXT: LV(REG): At #7 Interval # 3 -; CHECK-NEXT: LV(REG): At #8 Interval # 3 -; CHECK-NEXT: LV(REG): At #9 Interval # 3 -; CHECK-NEXT: LV(REG): At #10 Interval # 3 -; CHECK-NEXT: LV(REG): At #11 Interval # 3 -; CHECK-NEXT: LV(REG): At #12 Interval # 2 -; CHECK-NEXT: LV(REG): At #13 Interval # 2 -; CHECK-NEXT: LV(REG): VF = vscale x 4 -; CHECK-NEXT: LV(REG): Found max usage: 2 item -; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 3 registers -; CHECK-NEXT: LV(REG): RegisterClass: RISCV::VRRC, 2 registers -; CHECK-NEXT: LV(REG): Found invariant usage: 1 item -; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers -; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class -; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Loop cost is 24 -; CHECK-NEXT: LV: IC is 1 -; CHECK-NEXT: LV: VF is vscale x 4 -; CHECK-NEXT: LV: Not Interleaving. -; CHECK-NEXT: LV: Interleaving is not beneficial. -; CHECK-NEXT: LV: Found a vectorizable loop (vscale x 4) in <stdin> -; CHECK-NEXT: LEV: Epilogue vectorization is not profitable for this loop -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1 -; CHECK-NEXT: VPlan 'Final VPlan for VF={vscale x 4},UF={1}' { -; CHECK-NEXT: Live-in ir<%18> = VF -; CHECK-NEXT: Live-in ir<%18>.1 = VF * UF -; CHECK-NEXT: Live-in ir<%n.vec> = vector-trip-count -; CHECK-NEXT: Live-in ir<%0> = original trip-count -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body.preheader>: -; CHECK-NEXT: IR %0 = zext i32 %n to i64 -; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.scevcheck> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<vector.scevcheck>: -; CHECK-NEXT: IR %3 = add nsw i64 %0, -1 -; CHECK-NEXT: IR %4 = add i32 %n, -1 -; CHECK-NEXT: IR %5 = trunc i64 %3 to i32 -; CHECK-NEXT: IR %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 %5) -; CHECK-NEXT: IR %mul.result = extractvalue { i32, i1 } %mul, 0 -; CHECK-NEXT: IR %mul.overflow = extractvalue { i32, i1 } %mul, 1 -; CHECK-NEXT: IR %6 = sub i32 %4, %mul.result -; CHECK-NEXT: IR %7 = icmp ugt i32 %6, %4 -; CHECK-NEXT: IR %8 = or i1 %7, %mul.overflow -; CHECK-NEXT: IR %9 = icmp ugt i64 %3, 4294967295 -; CHECK-NEXT: IR %10 = or i1 %8, %9 -; CHECK-NEXT: EMIT branch-on-cond ir<%10> -; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.memcheck> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<vector.memcheck>: -; CHECK-NEXT: IR %11 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: IR %12 = mul nuw i64 %11, 4 -; CHECK-NEXT: IR %13 = mul i64 %12, 4 -; CHECK-NEXT: IR %14 = sub i64 %B1, %A2 -; CHECK-NEXT: IR %diff.check = icmp ult i64 %14, %13 -; CHECK-NEXT: EMIT branch-on-cond ir<%diff.check> -; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<vector.ph>: -; CHECK-NEXT: IR %15 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: IR %16 = mul nuw i64 %15, 4 -; CHECK-NEXT: IR %n.mod.vf = urem i64 %0, %16 -; CHECK-NEXT: IR %n.vec = sub i64 %0, %n.mod.vf -; CHECK-NEXT: IR %17 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: IR %18 = mul nuw i64 %17, 4 -; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%0> + ir<%n.vec> * ir<-1> -; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%n> + ir<%n.vec> * ir<-1> -; CHECK-NEXT: Successor(s): vector.body -; CHECK-EMPTY: -; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, ir-bb<vector.ph> ], [ vp<%index.next>, vector.body ] -; CHECK-NEXT: vp<%5> = DERIVED-IV ir<%n> + vp<%index> * ir<-1> -; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%5>, ir<-1> -; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0> -; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom> -; CHECK-NEXT: vp<%6> = vector-end-pointer inbounds ir<%arrayidx>, ir<%18> -; CHECK-NEXT: WIDEN ir<%19> = load vp<%6> -; CHECK-NEXT: WIDEN ir<%add9> = add ir<%19>, ir<1> -; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom> -; CHECK-NEXT: vp<%7> = vector-end-pointer inbounds ir<%arrayidx3>, ir<%18> -; CHECK-NEXT: WIDEN store vp<%7>, ir<%add9> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<%18>.1 -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<%n.vec> -; CHECK-NEXT: Successor(s): middle.block, vector.body -; CHECK-EMPTY: -; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%0>, ir<%n.vec> -; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> -; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, ir-bb<scalar.ph> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>: -; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<scalar.ph>: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%0>, ir-bb<for.body.preheader> ], [ ir<%0>, ir-bb<vector.scevcheck> ], [ ir<%0>, ir-bb<vector.memcheck> ] -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<%4>, middle.block ], [ ir<%n>, ir-bb<for.body.preheader> ], [ ir<%n>, ir-bb<vector.scevcheck> ], [ ir<%n>, ir-bb<vector.memcheck> ] -; CHECK-NEXT: Successor(s): ir-bb<for.body> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body>: -; CHECK-NEXT: IR %indvars.iv = phi i64 [ %0, %scalar.ph ], [ %indvars.iv.next, %for.body ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>) -; CHECK-NEXT: IR %i.0.in8 = phi i32 [ %n, %scalar.ph ], [ %i.0, %for.body ] (extra operand: vp<%bc.resume.val>.1 from ir-bb<scalar.ph>) -; CHECK-NEXT: IR %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: IR %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: IR %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: IR %19 = load i32, ptr %arrayidx, align 4 -; CHECK-NEXT: IR %add9 = add i32 %19, 1 -; CHECK-NEXT: IR %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: IR store i32 %add9, ptr %arrayidx3, align 4 -; CHECK-NEXT: IR %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: IR %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<for.body.preheader> in BB: for.body.preheader -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: for.body.preheader: ; preds = %entry -; CHECK-NEXT: %0 = zext i32 %n to i64 -; CHECK-NEXT: %1 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %2 = mul nuw i64 %1, 4 -; CHECK-NEXT: %min.iters.check = icmp ult i64 %0, %2 -; CHECK-NEXT: br i1 %min.iters.check, label %scalar.ph, label %vector.ph -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<vector.scevcheck> in BB: vector.scevcheck -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.scevcheck: ; No predecessors! -; CHECK-NEXT: %3 = add nsw i64 %0, -1 -; CHECK-NEXT: %4 = add i32 %n, -1 -; CHECK-NEXT: %5 = trunc i64 %3 to i32 -; CHECK-NEXT: %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 %5) -; CHECK-NEXT: %mul.result = extractvalue { i32, i1 } %mul, 0 -; CHECK-NEXT: %mul.overflow = extractvalue { i32, i1 } %mul, 1 -; CHECK-NEXT: %6 = sub i32 %4, %mul.result -; CHECK-NEXT: %7 = icmp ugt i32 %6, %4 -; CHECK-NEXT: %8 = or i1 %7, %mul.overflow -; CHECK-NEXT: %9 = icmp ugt i64 %3, 4294967295 -; CHECK-NEXT: %10 = or i1 %8, %9 -; CHECK-NEXT: br i1 %10, <null operand!>, <null operand!> -; CHECK-NEXT: LV: draw edge from for.body.preheader -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<vector.memcheck> in BB: vector.memcheck -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.memcheck: ; No predecessors! -; CHECK-NEXT: %11 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %12 = mul nuw i64 %11, 4 -; CHECK-NEXT: %13 = mul i64 %12, 4 -; CHECK-NEXT: %14 = sub i64 %B1, %A2 -; CHECK-NEXT: %diff.check = icmp ult i64 %14, %13 -; CHECK-NEXT: br i1 %diff.check, <null operand!>, <null operand!> -; CHECK-NEXT: LV: draw edge from vector.scevcheck -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<vector.ph> in BB: vector.ph -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.ph: ; No predecessors! -; CHECK-NEXT: %15 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %16 = mul nuw i64 %15, 4 -; CHECK-NEXT: %n.mod.vf = urem i64 %0, %16 -; CHECK-NEXT: %n.vec = sub i64 %0, %n.mod.vf -; CHECK-NEXT: %17 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %18 = mul nuw i64 %17, 4 -; CHECK-NEXT: %19 = sub i64 %0, %n.vec -; CHECK-NEXT: %.cast = trunc i64 %n.vec to i32 -; CHECK-NEXT: %20 = sub i32 %n, %.cast -; CHECK-NEXT: br -; CHECK-NEXT: LV: draw edge from vector.memcheck -; CHECK-NEXT: LV: created vector.body -; CHECK-NEXT: LV: draw edge from vector.ph -; CHECK-NEXT: LV: vectorizing VPBB: vector.body in BB: vector.body -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.body: ; preds = %vector.body, %vector.ph -; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ] -; CHECK-NEXT: %.cast3 = trunc i64 %index to i32 -; CHECK-NEXT: %offset.idx = sub i32 %n, %.cast3 -; CHECK-NEXT: %21 = add nsw i32 %offset.idx, -1 -; CHECK-NEXT: %22 = zext i32 %21 to i64 -; CHECK-NEXT: %23 = getelementptr inbounds i32, ptr %B, i64 %22 -; CHECK-NEXT: %24 = mul i64 0, %18 -; CHECK-NEXT: %25 = sub i64 %18, 1 -; CHECK-NEXT: %26 = mul i64 -1, %25 -; CHECK-NEXT: %27 = getelementptr inbounds i32, ptr %23, i64 %24 -; CHECK-NEXT: %28 = getelementptr inbounds i32, ptr %27, i64 %26 -; CHECK-NEXT: %wide.load = load <vscale x 4 x i32>, ptr %28, align 4 -; CHECK-NEXT: %reverse = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %wide.load) -; CHECK-NEXT: %29 = add <vscale x 4 x i32> %reverse, splat (i32 1) -; CHECK-NEXT: %30 = getelementptr inbounds i32, ptr %A, i64 %22 -; CHECK-NEXT: %31 = mul i64 0, %18 -; CHECK-NEXT: %32 = sub i64 %18, 1 -; CHECK-NEXT: %33 = mul i64 -1, %32 -; CHECK-NEXT: %34 = getelementptr inbounds i32, ptr %30, i64 %31 -; CHECK-NEXT: %35 = getelementptr inbounds i32, ptr %34, i64 %33 -; CHECK-NEXT: %reverse4 = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %29) -; CHECK-NEXT: store <vscale x 4 x i32> %reverse4, ptr %35, align 4 -; CHECK-NEXT: %index.next = add nuw i64 %index, %18 -; CHECK-NEXT: %36 = icmp eq i64 %index.next, %n.vec -; CHECK-NEXT: br i1 %36, <null operand!>, label %vector.body -; CHECK-NEXT: LV: created middle.block -; CHECK-NEXT: LV: draw edge from vector.body -; CHECK-NEXT: LV: vectorizing VPBB: middle.block in BB: middle.block -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: middle.block: ; preds = %vector.body -; CHECK-NEXT: %cmp.n = icmp eq i64 %0, %n.vec -; CHECK-NEXT: br i1 %cmp.n, <null operand!>, <null operand!> -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<for.cond.cleanup.loopexit> in BB: for.cond.cleanup.loopexit -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: for.cond.cleanup.loopexit: ; preds = %for.body -; CHECK-NEXT: br label %for.cond.cleanup -; CHECK-NEXT: LV: draw edge from middle.block -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<scalar.ph> in BB: scalar.ph -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: scalar.ph: ; preds = %for.body.preheader -; CHECK-NEXT: %bc.resume.val = phi i64 [ %19, %middle.block ], [ %0, %for.body.preheader ], [ %0, %vector.scevcheck ], [ %0, %vector.memcheck ] -; CHECK-NEXT: %bc.resume.val5 = phi i32 [ %20, %middle.block ], [ %n, %for.body.preheader ], [ %n, %vector.scevcheck ], [ %n, %vector.memcheck ] -; CHECK-NEXT: br label %for.body -; CHECK-NEXT: LV: draw edge from middle.block -; CHECK-NEXT: LV: draw edge from for.body.preheader -; CHECK-NEXT: LV: draw edge from vector.scevcheck -; CHECK-NEXT: LV: draw edge from vector.memcheck -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<for.body> in BB: for.body -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: for.body: ; preds = %for.body, %scalar.ph -; CHECK-NEXT: %indvars.iv = phi i64 [ %bc.resume.val, %scalar.ph ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: %i.0.in8 = phi i32 [ %bc.resume.val5, %scalar.ph ], [ %i.0, %for.body ] -; CHECK-NEXT: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom -; CHECK-NEXT: %37 = load i32, ptr %arrayidx, align 4 -; CHECK-NEXT: %add9 = add i32 %37, 1 -; CHECK-NEXT: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom -; CHECK-NEXT: store i32 %add9, ptr %arrayidx3, align 4 -; CHECK-NEXT: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 -; CHECK-NEXT: LV: draw edge from scalar.ph -; CHECK-NEXT: LV: Interleaving disabled by the pass manager -; CHECK-NEXT: LV: Vectorizing: innermost loop. -; CHECK-EMPTY: +; RV64-LABEL: define void @vector_reverse_i64( +; RV64-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +; RV64-NEXT: [[ENTRY:.*:]] +; RV64-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; RV64-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; RV64-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0 +; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] +; RV64: [[FOR_BODY_PREHEADER]]: +; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 +; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; RV64: [[VECTOR_SCEVCHECK]]: +; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1 +; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 +; RV64-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 +; RV64-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) +; RV64-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 +; RV64-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 +; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] +; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] +; RV64-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 +; RV64-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] +; RV64-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; RV64: [[VECTOR_MEMCHECK]]: +; RV64-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; RV64-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4 +; RV64-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]] +; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] +; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 +; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] +; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 +; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]] +; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]] +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] +; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 +; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64 +; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP22]] +; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]] +; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]] +; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP24]] +; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP27]], i64 [[TMP26]] +; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP28]], align 4 +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP29:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) +; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]] +; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]] +; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]] +; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP30]], i64 [[TMP31]] +; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP34]], i64 [[TMP33]] +; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]]) +; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP35]], align 4 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; RV64-NEXT: br label %[[FOR_COND_CLEANUP]] +; RV64: [[FOR_COND_CLEANUP]]: +; RV64-NEXT: ret void +; RV64: [[FOR_BODY]]: +; +; RV32-LABEL: define void @vector_reverse_i64( +; RV32-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +; RV32-NEXT: [[ENTRY:.*:]] +; RV32-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i32 +; RV32-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i32 +; RV32-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0 +; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] +; RV32: [[FOR_BODY_PREHEADER]]: +; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 +; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; RV32: [[VECTOR_MEMCHECK]]: +; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 +; RV32-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 4 +; RV32-NEXT: [[TMP6:%.*]] = sub i32 [[B1]], [[A2]] +; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]] +; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]] +; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]] +; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]] +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] +; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 +; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 +; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP14]] +; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32 +; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]] +; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1 +; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]] +; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 [[TMP17]] +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i32 [[TMP19]] +; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP21]], align 4 +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) +; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]] +; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32 +; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]] +; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1 +; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]] +; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 [[TMP25]] +; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i32 [[TMP27]] +; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]]) +; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP29]], align 4 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; RV32-NEXT: br label %[[FOR_COND_CLEANUP]] +; RV32: [[FOR_COND_CLEANUP]]: +; RV32-NEXT: ret void +; RV32: [[FOR_BODY]]: +; +; RV64-UF2-LABEL: define void @vector_reverse_i64( +; RV64-UF2-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +; RV64-UF2-NEXT: [[ENTRY:.*:]] +; RV64-UF2-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; RV64-UF2-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; RV64-UF2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0 +; RV64-UF2-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] +; RV64-UF2: [[FOR_BODY_PREHEADER]]: +; RV64-UF2-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; RV64-UF2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 8 +; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; RV64-UF2: [[VECTOR_SCEVCHECK]]: +; RV64-UF2-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1 +; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 +; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 +; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) +; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 +; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 +; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] +; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] +; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 +; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] +; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; RV64-UF2: [[VECTOR_MEMCHECK]]: +; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; RV64-UF2-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 8 +; RV64-UF2-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]] +; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] +; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 +; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] +; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 +; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 +; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]] +; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]] +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] +; RV64-UF2-NEXT: [[TMP22:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 +; RV64-UF2-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 +; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP18]] +; RV64-UF2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP27:%.*]] = mul i64 -1, [[TMP26]] +; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP25]] +; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i64 [[TMP27]] +; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP18]] +; RV64-UF2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP32:%.*]] = mul i64 -1, [[TMP31]] +; RV64-UF2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP30]] +; RV64-UF2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP33]], i64 [[TMP32]] +; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP29]], align 4 +; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; RV64-UF2-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i32>, ptr [[TMP34]], align 4 +; RV64-UF2-NEXT: [[REVERSE5:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD4]]) +; RV64-UF2-NEXT: [[TMP35:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1) +; RV64-UF2-NEXT: [[TMP36:%.*]] = add <vscale x 4 x i32> [[REVERSE5]], splat (i32 1) +; RV64-UF2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP38:%.*]] = mul i64 0, [[TMP18]] +; RV64-UF2-NEXT: [[TMP39:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP40:%.*]] = mul i64 -1, [[TMP39]] +; RV64-UF2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[TMP37]], i64 [[TMP38]] +; RV64-UF2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[TMP41]], i64 [[TMP40]] +; RV64-UF2-NEXT: [[TMP43:%.*]] = mul i64 -1, [[TMP18]] +; RV64-UF2-NEXT: [[TMP44:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP45:%.*]] = mul i64 -1, [[TMP44]] +; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds i32, ptr [[TMP37]], i64 [[TMP43]] +; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds i32, ptr [[TMP46]], i64 [[TMP45]] +; RV64-UF2-NEXT: [[REVERSE6:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP35]]) +; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE6]], ptr [[TMP42]], align 4 +; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP36]]) +; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE7]], ptr [[TMP47]], align 4 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; RV64-UF2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; RV64-UF2-NEXT: br label %[[FOR_COND_CLEANUP]] +; RV64-UF2: [[FOR_COND_CLEANUP]]: +; RV64-UF2-NEXT: ret void +; RV64-UF2: [[FOR_BODY]]: ; entry: %cmp7 = icmp sgt i32 %n, 0 @@ -423,390 +478,259 @@ for.body: ; preds = %for.body.preheader, } define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) { -; CHECK-LABEL: 'vector_reverse_f32' -; CHECK-NEXT: LV: Loop hints: force=enabled width=vscale x 4 interleave=0 -; CHECK-NEXT: LV: Found a loop: for.body -; CHECK-NEXT: LV: Found an induction variable. -; CHECK-NEXT: LV: Found an induction variable. -; CHECK-NEXT: LV: Found FP op with unsafe algebra. -; CHECK-NEXT: LV: Did not find one integer induction var. -; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)! -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Found trip count: 0 -; CHECK-NEXT: LV: Found maximum trip count: 4294967295 -; CHECK-NEXT: LV: Scalable vectorization is available -; CHECK-NEXT: LV: The max safe fixed VF is: 67108864. -; CHECK-NEXT: LV: The max safe scalable VF is: vscale x 4294967295. -; CHECK-NEXT: LV: Found uniform instruction: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Found uniform instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Found uniform instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: LV: Found uniform instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] -; CHECK-NEXT: LV: Found uniform instruction: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 4 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 -; CHECK-NEXT: LV: Using user VF vscale x 4. -; CHECK-NEXT: Creating VPBasicBlock for for.body -; CHECK-NEXT: VPlan 'Plain CFG -; CHECK-NEXT: for UF>=1' { -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body.preheader>: -; CHECK-NEXT: IR %0 = zext i32 %n to i64 -; CHECK-NEXT: Successor(s): for.body -; CHECK-EMPTY: -; CHECK-NEXT: for.body: -; CHECK-NEXT: WIDEN-PHI ir<%indvars.iv> = phi [ ir<%indvars.iv.next>, for.body ], [ ir<%0>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: WIDEN-PHI ir<%i.0.in8> = phi [ ir<%i.0>, for.body ], [ ir<%n>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: EMIT ir<%i.0> = add ir<%i.0.in8>, ir<-1> -; CHECK-NEXT: EMIT-SCALAR ir<%idxprom> = zext ir<%i.0> -; CHECK-NEXT: EMIT ir<%arrayidx> = getelementptr ir<%B>, ir<%idxprom> -; CHECK-NEXT: EMIT ir<%1> = load ir<%arrayidx> -; CHECK-NEXT: EMIT ir<%conv1> = fadd ir<%1>, ir<1.000000e+00> -; CHECK-NEXT: EMIT ir<%arrayidx3> = getelementptr ir<%A>, ir<%idxprom> -; CHECK-NEXT: EMIT store ir<%conv1>, ir<%arrayidx3> -; CHECK-NEXT: EMIT ir<%cmp> = icmp ir<%indvars.iv>, ir<1> -; CHECK-NEXT: EMIT ir<%indvars.iv.next> = add ir<%indvars.iv>, ir<-1> -; CHECK-NEXT: EMIT branch-on-cond ir<%cmp> -; CHECK-NEXT: Successor(s): for.body, ir-bb<for.cond.cleanup.loopexit> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>: -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Scalarizing: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Scalarizing: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Scalarizing: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: VPlan 'Initial VPlan for VF={vscale x 4},UF>=1' { -; CHECK-NEXT: Live-in vp<%0> = VF -; CHECK-NEXT: Live-in vp<%1> = VF * UF -; CHECK-NEXT: Live-in vp<%2> = vector-trip-count -; CHECK-NEXT: vp<%3> = original trip-count -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body.preheader>: -; CHECK-NEXT: IR %0 = zext i32 %n to i64 -; CHECK-NEXT: EMIT vp<%3> = EXPAND SCEV (zext i32 %n to i64) -; CHECK-NEXT: Successor(s): scalar.ph, vector.ph -; CHECK-EMPTY: -; CHECK-NEXT: vector.ph: -; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%0> + vp<%2> * ir<-1> -; CHECK-NEXT: vp<%5> = DERIVED-IV ir<%n> + vp<%2> * ir<-1> -; CHECK-NEXT: Successor(s): vector loop -; CHECK-EMPTY: -; CHECK-NEXT: <x1> vector loop: { -; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<%6> = CANONICAL-INDUCTION ir<0>, vp<%index.next> -; CHECK-NEXT: vp<%7> = DERIVED-IV ir<%n> + vp<%6> * ir<-1> -; CHECK-NEXT: vp<%8> = SCALAR-STEPS vp<%7>, ir<-1>, vp<%0> -; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%8>, ir<-1> -; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0> -; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom> -; CHECK-NEXT: vp<%9> = vector-end-pointer inbounds ir<%arrayidx>, vp<%0> -; CHECK-NEXT: WIDEN ir<%1> = load vp<%9> -; CHECK-NEXT: WIDEN ir<%conv1> = fadd ir<%1>, ir<1.000000e+00> -; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom> -; CHECK-NEXT: vp<%10> = vector-end-pointer inbounds ir<%arrayidx3>, vp<%0> -; CHECK-NEXT: WIDEN store vp<%10>, ir<%conv1> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%6>, vp<%1> -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: Successor(s): middle.block -; CHECK-EMPTY: -; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<%3>, vp<%2> -; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> -; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, scalar.ph -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>: -; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%4>, middle.block ], [ ir<%0>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<%5>, middle.block ], [ ir<%n>, ir-bb<for.body.preheader> ] -; CHECK-NEXT: Successor(s): ir-bb<for.body> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body>: -; CHECK-NEXT: IR %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] (extra operand: vp<%bc.resume.val> from scalar.ph) -; CHECK-NEXT: IR %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] (extra operand: vp<%bc.resume.val>.1 from scalar.ph) -; CHECK-NEXT: IR %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: IR %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: IR %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: IR %1 = load float, ptr %arrayidx, align 4 -; CHECK-NEXT: IR %conv1 = fadd float %1, 1.000000e+00 -; CHECK-NEXT: IR %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: IR store float %conv1, ptr %arrayidx3, align 4 -; CHECK-NEXT: IR %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: IR %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ] -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 4 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: LV: Found an estimated cost of 9 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 -; CHECK-NEXT: LV(REG): Calculating max register usage: -; CHECK-NEXT: LV(REG): At #0 Interval # 0 -; CHECK-NEXT: LV(REG): At #1 Interval # 1 -; CHECK-NEXT: LV(REG): At #2 Interval # 2 -; CHECK-NEXT: LV(REG): At #3 Interval # 2 -; CHECK-NEXT: LV(REG): At #4 Interval # 2 -; CHECK-NEXT: LV(REG): At #5 Interval # 2 -; CHECK-NEXT: LV(REG): At #6 Interval # 3 -; CHECK-NEXT: LV(REG): At #7 Interval # 3 -; CHECK-NEXT: LV(REG): At #8 Interval # 3 -; CHECK-NEXT: LV(REG): At #9 Interval # 3 -; CHECK-NEXT: LV(REG): At #10 Interval # 3 -; CHECK-NEXT: LV(REG): At #11 Interval # 3 -; CHECK-NEXT: LV(REG): At #12 Interval # 2 -; CHECK-NEXT: LV(REG): At #13 Interval # 2 -; CHECK-NEXT: LV(REG): VF = vscale x 4 -; CHECK-NEXT: LV(REG): Found max usage: 2 item -; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 3 registers -; CHECK-NEXT: LV(REG): RegisterClass: RISCV::VRRC, 2 registers -; CHECK-NEXT: LV(REG): Found invariant usage: 1 item -; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers -; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class -; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Loop cost is 26 -; CHECK-NEXT: LV: IC is 1 -; CHECK-NEXT: LV: VF is vscale x 4 -; CHECK-NEXT: LV: Not Interleaving. -; CHECK-NEXT: LV: Interleaving is not beneficial. -; CHECK-NEXT: LV: Found a vectorizable loop (vscale x 4) in <stdin> -; CHECK-NEXT: LEV: Epilogue vectorization is not profitable for this loop -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: LV: Loop does not require scalar epilogue -; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1 -; CHECK-NEXT: VPlan 'Final VPlan for VF={vscale x 4},UF={1}' { -; CHECK-NEXT: Live-in ir<%18> = VF -; CHECK-NEXT: Live-in ir<%18>.1 = VF * UF -; CHECK-NEXT: Live-in ir<%n.vec> = vector-trip-count -; CHECK-NEXT: Live-in ir<%0> = original trip-count -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body.preheader>: -; CHECK-NEXT: IR %0 = zext i32 %n to i64 -; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.scevcheck> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<vector.scevcheck>: -; CHECK-NEXT: IR %3 = add nsw i64 %0, -1 -; CHECK-NEXT: IR %4 = add i32 %n, -1 -; CHECK-NEXT: IR %5 = trunc i64 %3 to i32 -; CHECK-NEXT: IR %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 %5) -; CHECK-NEXT: IR %mul.result = extractvalue { i32, i1 } %mul, 0 -; CHECK-NEXT: IR %mul.overflow = extractvalue { i32, i1 } %mul, 1 -; CHECK-NEXT: IR %6 = sub i32 %4, %mul.result -; CHECK-NEXT: IR %7 = icmp ugt i32 %6, %4 -; CHECK-NEXT: IR %8 = or i1 %7, %mul.overflow -; CHECK-NEXT: IR %9 = icmp ugt i64 %3, 4294967295 -; CHECK-NEXT: IR %10 = or i1 %8, %9 -; CHECK-NEXT: EMIT branch-on-cond ir<%10> -; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.memcheck> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<vector.memcheck>: -; CHECK-NEXT: IR %11 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: IR %12 = mul nuw i64 %11, 4 -; CHECK-NEXT: IR %13 = mul i64 %12, 4 -; CHECK-NEXT: IR %14 = sub i64 %B1, %A2 -; CHECK-NEXT: IR %diff.check = icmp ult i64 %14, %13 -; CHECK-NEXT: EMIT branch-on-cond ir<%diff.check> -; CHECK-NEXT: Successor(s): ir-bb<scalar.ph>, ir-bb<vector.ph> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<vector.ph>: -; CHECK-NEXT: IR %15 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: IR %16 = mul nuw i64 %15, 4 -; CHECK-NEXT: IR %n.mod.vf = urem i64 %0, %16 -; CHECK-NEXT: IR %n.vec = sub i64 %0, %n.mod.vf -; CHECK-NEXT: IR %17 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: IR %18 = mul nuw i64 %17, 4 -; CHECK-NEXT: vp<%3> = DERIVED-IV ir<%0> + ir<%n.vec> * ir<-1> -; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%n> + ir<%n.vec> * ir<-1> -; CHECK-NEXT: Successor(s): vector.body -; CHECK-EMPTY: -; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, ir-bb<vector.ph> ], [ vp<%index.next>, vector.body ] -; CHECK-NEXT: vp<%5> = DERIVED-IV ir<%n> + vp<%index> * ir<-1> -; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%5>, ir<-1> -; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0> -; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom> -; CHECK-NEXT: vp<%6> = vector-end-pointer inbounds ir<%arrayidx>, ir<%18> -; CHECK-NEXT: WIDEN ir<%19> = load vp<%6> -; CHECK-NEXT: WIDEN ir<%conv1> = fadd ir<%19>, ir<1.000000e+00> -; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom> -; CHECK-NEXT: vp<%7> = vector-end-pointer inbounds ir<%arrayidx3>, ir<%18> -; CHECK-NEXT: WIDEN store vp<%7>, ir<%conv1> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<%18>.1 -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<%n.vec> -; CHECK-NEXT: Successor(s): middle.block, vector.body -; CHECK-EMPTY: -; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<%0>, ir<%n.vec> -; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> -; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, ir-bb<scalar.ph> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>: -; CHECK-NEXT: No successors -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<scalar.ph>: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%3>, middle.block ], [ ir<%0>, ir-bb<for.body.preheader> ], [ ir<%0>, ir-bb<vector.scevcheck> ], [ ir<%0>, ir-bb<vector.memcheck> ] -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<%4>, middle.block ], [ ir<%n>, ir-bb<for.body.preheader> ], [ ir<%n>, ir-bb<vector.scevcheck> ], [ ir<%n>, ir-bb<vector.memcheck> ] -; CHECK-NEXT: Successor(s): ir-bb<for.body> -; CHECK-EMPTY: -; CHECK-NEXT: ir-bb<for.body>: -; CHECK-NEXT: IR %indvars.iv = phi i64 [ %0, %scalar.ph ], [ %indvars.iv.next, %for.body ] (extra operand: vp<%bc.resume.val> from ir-bb<scalar.ph>) -; CHECK-NEXT: IR %i.0.in8 = phi i32 [ %n, %scalar.ph ], [ %i.0, %for.body ] (extra operand: vp<%bc.resume.val>.1 from ir-bb<scalar.ph>) -; CHECK-NEXT: IR %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: IR %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: IR %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: IR %19 = load float, ptr %arrayidx, align 4 -; CHECK-NEXT: IR %conv1 = fadd float %19, 1.000000e+00 -; CHECK-NEXT: IR %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: IR store float %conv1, ptr %arrayidx3, align 4 -; CHECK-NEXT: IR %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: IR %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: No successors -; CHECK-NEXT: } -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<for.body.preheader> in BB: for.body.preheader -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: for.body.preheader: ; preds = %entry -; CHECK-NEXT: %0 = zext i32 %n to i64 -; CHECK-NEXT: %1 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %2 = mul nuw i64 %1, 4 -; CHECK-NEXT: %min.iters.check = icmp ult i64 %0, %2 -; CHECK-NEXT: br i1 %min.iters.check, label %scalar.ph, label %vector.ph -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<vector.scevcheck> in BB: vector.scevcheck -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.scevcheck: ; No predecessors! -; CHECK-NEXT: %3 = add nsw i64 %0, -1 -; CHECK-NEXT: %4 = add i32 %n, -1 -; CHECK-NEXT: %5 = trunc i64 %3 to i32 -; CHECK-NEXT: %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 %5) -; CHECK-NEXT: %mul.result = extractvalue { i32, i1 } %mul, 0 -; CHECK-NEXT: %mul.overflow = extractvalue { i32, i1 } %mul, 1 -; CHECK-NEXT: %6 = sub i32 %4, %mul.result -; CHECK-NEXT: %7 = icmp ugt i32 %6, %4 -; CHECK-NEXT: %8 = or i1 %7, %mul.overflow -; CHECK-NEXT: %9 = icmp ugt i64 %3, 4294967295 -; CHECK-NEXT: %10 = or i1 %8, %9 -; CHECK-NEXT: br i1 %10, <null operand!>, <null operand!> -; CHECK-NEXT: LV: draw edge from for.body.preheader -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<vector.memcheck> in BB: vector.memcheck -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.memcheck: ; No predecessors! -; CHECK-NEXT: %11 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %12 = mul nuw i64 %11, 4 -; CHECK-NEXT: %13 = mul i64 %12, 4 -; CHECK-NEXT: %14 = sub i64 %B1, %A2 -; CHECK-NEXT: %diff.check = icmp ult i64 %14, %13 -; CHECK-NEXT: br i1 %diff.check, <null operand!>, <null operand!> -; CHECK-NEXT: LV: draw edge from vector.scevcheck -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<vector.ph> in BB: vector.ph -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.ph: ; No predecessors! -; CHECK-NEXT: %15 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %16 = mul nuw i64 %15, 4 -; CHECK-NEXT: %n.mod.vf = urem i64 %0, %16 -; CHECK-NEXT: %n.vec = sub i64 %0, %n.mod.vf -; CHECK-NEXT: %17 = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %18 = mul nuw i64 %17, 4 -; CHECK-NEXT: %19 = sub i64 %0, %n.vec -; CHECK-NEXT: %.cast = trunc i64 %n.vec to i32 -; CHECK-NEXT: %20 = sub i32 %n, %.cast -; CHECK-NEXT: br -; CHECK-NEXT: LV: draw edge from vector.memcheck -; CHECK-NEXT: LV: created vector.body -; CHECK-NEXT: LV: draw edge from vector.ph -; CHECK-NEXT: LV: vectorizing VPBB: vector.body in BB: vector.body -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: vector.body: ; preds = %vector.body, %vector.ph -; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ] -; CHECK-NEXT: %.cast3 = trunc i64 %index to i32 -; CHECK-NEXT: %offset.idx = sub i32 %n, %.cast3 -; CHECK-NEXT: %21 = add nsw i32 %offset.idx, -1 -; CHECK-NEXT: %22 = zext i32 %21 to i64 -; CHECK-NEXT: %23 = getelementptr inbounds float, ptr %B, i64 %22 -; CHECK-NEXT: %24 = mul i64 0, %18 -; CHECK-NEXT: %25 = sub i64 %18, 1 -; CHECK-NEXT: %26 = mul i64 -1, %25 -; CHECK-NEXT: %27 = getelementptr inbounds float, ptr %23, i64 %24 -; CHECK-NEXT: %28 = getelementptr inbounds float, ptr %27, i64 %26 -; CHECK-NEXT: %wide.load = load <vscale x 4 x float>, ptr %28, align 4 -; CHECK-NEXT: %reverse = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %wide.load) -; CHECK-NEXT: %29 = fadd <vscale x 4 x float> %reverse, splat (float 1.000000e+00) -; CHECK-NEXT: %30 = getelementptr inbounds float, ptr %A, i64 %22 -; CHECK-NEXT: %31 = mul i64 0, %18 -; CHECK-NEXT: %32 = sub i64 %18, 1 -; CHECK-NEXT: %33 = mul i64 -1, %32 -; CHECK-NEXT: %34 = getelementptr inbounds float, ptr %30, i64 %31 -; CHECK-NEXT: %35 = getelementptr inbounds float, ptr %34, i64 %33 -; CHECK-NEXT: %reverse4 = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %29) -; CHECK-NEXT: store <vscale x 4 x float> %reverse4, ptr %35, align 4 -; CHECK-NEXT: %index.next = add nuw i64 %index, %18 -; CHECK-NEXT: %36 = icmp eq i64 %index.next, %n.vec -; CHECK-NEXT: br i1 %36, <null operand!>, label %vector.body -; CHECK-NEXT: LV: created middle.block -; CHECK-NEXT: LV: draw edge from vector.body -; CHECK-NEXT: LV: vectorizing VPBB: middle.block in BB: middle.block -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: middle.block: ; preds = %vector.body -; CHECK-NEXT: %cmp.n = icmp eq i64 %0, %n.vec -; CHECK-NEXT: br i1 %cmp.n, <null operand!>, <null operand!> -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<for.cond.cleanup.loopexit> in BB: for.cond.cleanup.loopexit -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: for.cond.cleanup.loopexit: ; preds = %for.body -; CHECK-NEXT: br label %for.cond.cleanup -; CHECK-NEXT: LV: draw edge from middle.block -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<scalar.ph> in BB: scalar.ph -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: scalar.ph: ; preds = %for.body.preheader -; CHECK-NEXT: %bc.resume.val = phi i64 [ %19, %middle.block ], [ %0, %for.body.preheader ], [ %0, %vector.scevcheck ], [ %0, %vector.memcheck ] -; CHECK-NEXT: %bc.resume.val5 = phi i32 [ %20, %middle.block ], [ %n, %for.body.preheader ], [ %n, %vector.scevcheck ], [ %n, %vector.memcheck ] -; CHECK-NEXT: br label %for.body -; CHECK-NEXT: LV: draw edge from middle.block -; CHECK-NEXT: LV: draw edge from for.body.preheader -; CHECK-NEXT: LV: draw edge from vector.scevcheck -; CHECK-NEXT: LV: draw edge from vector.memcheck -; CHECK-NEXT: LV: vectorizing VPBB: ir-bb<for.body> in BB: for.body -; CHECK-NEXT: LV: filled BB: -; CHECK-NEXT: for.body: ; preds = %for.body, %scalar.ph -; CHECK-NEXT: %indvars.iv = phi i64 [ %bc.resume.val, %scalar.ph ], [ %indvars.iv.next, %for.body ] -; CHECK-NEXT: %i.0.in8 = phi i32 [ %bc.resume.val5, %scalar.ph ], [ %i.0, %for.body ] -; CHECK-NEXT: %i.0 = add nsw i32 %i.0.in8, -1 -; CHECK-NEXT: %idxprom = zext i32 %i.0 to i64 -; CHECK-NEXT: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom -; CHECK-NEXT: %37 = load float, ptr %arrayidx, align 4 -; CHECK-NEXT: %conv1 = fadd float %37, 1.000000e+00 -; CHECK-NEXT: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom -; CHECK-NEXT: store float %conv1, ptr %arrayidx3, align 4 -; CHECK-NEXT: %cmp = icmp ugt i64 %indvars.iv, 1 -; CHECK-NEXT: %indvars.iv.next = add nsw i64 %indvars.iv, -1 -; CHECK-NEXT: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0 -; CHECK-NEXT: LV: draw edge from scalar.ph -; CHECK-NEXT: LV: Interleaving disabled by the pass manager -; CHECK-NEXT: LV: Vectorizing: innermost loop. +; RV64-LABEL: define void @vector_reverse_f32( +; RV64-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +; RV64-NEXT: [[ENTRY:.*:]] +; RV64-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; RV64-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; RV64-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0 +; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] +; RV64: [[FOR_BODY_PREHEADER]]: +; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 +; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; RV64: [[VECTOR_SCEVCHECK]]: +; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1 +; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 +; RV64-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 +; RV64-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) +; RV64-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 +; RV64-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 +; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] +; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] +; RV64-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 +; RV64-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] +; RV64-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; RV64: [[VECTOR_MEMCHECK]]: +; RV64-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; RV64-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4 +; RV64-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]] +; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] +; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 +; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] +; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 +; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]] +; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]] +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] +; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 +; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64 +; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP22]] +; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]] +; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]] +; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP24]] +; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP27]], i64 [[TMP26]] +; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP28]], align 4 +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP29:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) +; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]] +; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]] +; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1 +; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]] +; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP30]], i64 [[TMP31]] +; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP34]], i64 [[TMP33]] +; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]]) +; RV64-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP35]], align 4 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; RV64-NEXT: br label %[[FOR_COND_CLEANUP]] +; RV64: [[FOR_COND_CLEANUP]]: +; RV64-NEXT: ret void +; RV64: [[FOR_BODY]]: +; +; RV32-LABEL: define void @vector_reverse_f32( +; RV32-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +; RV32-NEXT: [[ENTRY:.*:]] +; RV32-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i32 +; RV32-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i32 +; RV32-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0 +; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] +; RV32: [[FOR_BODY_PREHEADER]]: +; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4 +; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; RV32: [[VECTOR_MEMCHECK]]: +; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 +; RV32-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 4 +; RV32-NEXT: [[TMP6:%.*]] = sub i32 [[B1]], [[A2]] +; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]] +; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]] +; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]] +; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]] +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] +; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 +; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 +; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP14]] +; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32 +; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]] +; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1 +; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]] +; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP17]] +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP19]] +; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP21]], align 4 +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP22:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) +; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP14]] +; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32 +; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]] +; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1 +; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]] +; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP25]] +; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i32 [[TMP27]] +; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]]) +; RV32-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP29]], align 4 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; RV32-NEXT: br label %[[FOR_COND_CLEANUP]] +; RV32: [[FOR_COND_CLEANUP]]: +; RV32-NEXT: ret void +; RV32: [[FOR_BODY]]: +; +; RV64-UF2-LABEL: define void @vector_reverse_f32( +; RV64-UF2-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +; RV64-UF2-NEXT: [[ENTRY:.*:]] +; RV64-UF2-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; RV64-UF2-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; RV64-UF2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0 +; RV64-UF2-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] +; RV64-UF2: [[FOR_BODY_PREHEADER]]: +; RV64-UF2-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; RV64-UF2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 8 +; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; RV64-UF2: [[VECTOR_SCEVCHECK]]: +; RV64-UF2-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1 +; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 +; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 +; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) +; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 +; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 +; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] +; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] +; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 +; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] +; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; RV64-UF2: [[VECTOR_MEMCHECK]]: +; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; RV64-UF2-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 8 +; RV64-UF2-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]] +; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]] +; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8 +; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]] +; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4 +; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 +; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]] +; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 +; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]] +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32 +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]] +; RV64-UF2-NEXT: [[TMP22:%.*]] = add nsw i32 [[OFFSET_IDX]], -1 +; RV64-UF2-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 +; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP18]] +; RV64-UF2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP27:%.*]] = mul i64 -1, [[TMP26]] +; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP25]] +; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP27]] +; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP18]] +; RV64-UF2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP32:%.*]] = mul i64 -1, [[TMP31]] +; RV64-UF2-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP30]] +; RV64-UF2-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP33]], i64 [[TMP32]] +; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP29]], align 4 +; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV64-UF2-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP34]], align 4 +; RV64-UF2-NEXT: [[REVERSE5:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD4]]) +; RV64-UF2-NEXT: [[TMP35:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) +; RV64-UF2-NEXT: [[TMP36:%.*]] = fadd <vscale x 4 x float> [[REVERSE5]], splat (float 1.000000e+00) +; RV64-UF2-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP38:%.*]] = mul i64 0, [[TMP18]] +; RV64-UF2-NEXT: [[TMP39:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP40:%.*]] = mul i64 -1, [[TMP39]] +; RV64-UF2-NEXT: [[TMP41:%.*]] = getelementptr inbounds float, ptr [[TMP37]], i64 [[TMP38]] +; RV64-UF2-NEXT: [[TMP42:%.*]] = getelementptr inbounds float, ptr [[TMP41]], i64 [[TMP40]] +; RV64-UF2-NEXT: [[TMP43:%.*]] = mul i64 -1, [[TMP18]] +; RV64-UF2-NEXT: [[TMP44:%.*]] = sub i64 [[TMP18]], 1 +; RV64-UF2-NEXT: [[TMP45:%.*]] = mul i64 -1, [[TMP44]] +; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds float, ptr [[TMP37]], i64 [[TMP43]] +; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds float, ptr [[TMP46]], i64 [[TMP45]] +; RV64-UF2-NEXT: [[REVERSE6:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP35]]) +; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE6]], ptr [[TMP42]], align 4 +; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP36]]) +; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE7]], ptr [[TMP47]], align 4 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; RV64-UF2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ] +; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; RV64-UF2-NEXT: br label %[[FOR_COND_CLEANUP]] +; RV64-UF2: [[FOR_COND_CLEANUP]]: +; RV64-UF2-NEXT: ret void +; RV64-UF2: [[FOR_BODY]]: ; entry: %cmp7 = icmp sgt i32 %n, 0 @@ -834,8 +758,397 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0 } -!0 = distinct !{!0, !1, !2, !3, !4} -!1 = !{!"llvm.loop.mustprogress"} -!2 = !{!"llvm.loop.vectorize.width", i32 4} -!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} -!4 = !{!"llvm.loop.vectorize.enable", i1 true} +define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { +; RV64-LABEL: define void @vector_reverse_f32_simplify( +; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV64-NEXT: [[ENTRY:.*]]: +; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 +; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]] +; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]] +; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[TMP9]] +; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP11]] +; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4 +; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP14:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) +; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]] +; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 +; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] +; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]] +; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[TMP18]] +; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]]) +; RV64-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP20]], align 4 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_BODY]]: +; +; RV32-LABEL: define void @vector_reverse_f32_simplify( +; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV32-NEXT: [[ENTRY:.*]]: +; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 +; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]] +; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]] +; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1 +; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]] +; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP10]] +; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP12]] +; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4 +; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP15:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) +; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]] +; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] +; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1 +; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]] +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP18]] +; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP20]] +; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]]) +; RV32-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP22]], align 4 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_BODY]]: +; +; RV64-UF2-LABEL: define void @vector_reverse_f32_simplify( +; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV64-UF2-NEXT: [[ENTRY:.*]]: +; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-UF2-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1 +; RV64-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP8]] +; RV64-UF2-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]] +; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP10]] +; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP12]] +; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP17:%.*]] = mul i64 -1, [[TMP16]] +; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP15]] +; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[TMP17]] +; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4 +; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]]) +; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP19]], align 4 +; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1]]) +; RV64-UF2-NEXT: [[TMP20:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00) +; RV64-UF2-NEXT: [[TMP21:%.*]] = fadd <vscale x 4 x float> [[REVERSE2]], splat (float 1.000000e+00) +; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]] +; RV64-UF2-NEXT: [[TMP23:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 -1, [[TMP24]] +; RV64-UF2-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP25]] +; RV64-UF2-NEXT: [[TMP28:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP29:%.*]] = sub i64 [[TMP5]], 1 +; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP29]] +; RV64-UF2-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[TMP28]] +; RV64-UF2-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP31]], i64 [[TMP30]] +; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP20]]) +; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE3]], ptr [[TMP27]], align 4 +; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP21]]) +; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP32]], align 4 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; RV64-UF2-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[TMP33]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_BODY]]: +; +entry: + br label %for.body + +for.body: + %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] + %iv.next = add nsw i64 %dec.iv, -1 + %arrayidx.b = getelementptr inbounds float, ptr %B, i64 %iv.next + %0 = load float, ptr %arrayidx.b, align 4 + %fadd = fadd float %0, 1.000000e+00 + %arrayidx.a = getelementptr inbounds float, ptr %A, i64 %iv.next + store float %fadd, ptr %arrayidx.a, align 4 + %cmp = icmp ugt i64 %dec.iv, 1 + br i1 %cmp, label %for.body, label %exit, !llvm.loop !0 + +exit: + ret void +} + +define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) { +; RV64-LABEL: define void @vector_reverse_irregular_type( +; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV64-NEXT: [[ENTRY:.*]]: +; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV64-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1 +; RV64-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2 +; RV64-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3 +; RV64-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP0]], -1 +; RV64-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP1]], -1 +; RV64-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP2]], -1 +; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP3]], -1 +; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP4]] +; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP5]] +; RV64-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP6]] +; RV64-NEXT: [[TMP11:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP7]] +; RV64-NEXT: [[TMP12:%.*]] = load i7, ptr [[TMP8]], align 1 +; RV64-NEXT: [[TMP13:%.*]] = load i7, ptr [[TMP9]], align 1 +; RV64-NEXT: [[TMP14:%.*]] = load i7, ptr [[TMP10]], align 1 +; RV64-NEXT: [[TMP15:%.*]] = load i7, ptr [[TMP11]], align 1 +; RV64-NEXT: [[TMP16:%.*]] = insertelement <4 x i7> poison, i7 [[TMP12]], i32 0 +; RV64-NEXT: [[TMP17:%.*]] = insertelement <4 x i7> [[TMP16]], i7 [[TMP13]], i32 1 +; RV64-NEXT: [[TMP18:%.*]] = insertelement <4 x i7> [[TMP17]], i7 [[TMP14]], i32 2 +; RV64-NEXT: [[TMP19:%.*]] = insertelement <4 x i7> [[TMP18]], i7 [[TMP15]], i32 3 +; RV64-NEXT: [[TMP20:%.*]] = add <4 x i7> [[TMP19]], splat (i7 1) +; RV64-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP4]] +; RV64-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP5]] +; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP6]] +; RV64-NEXT: [[TMP24:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP7]] +; RV64-NEXT: [[TMP25:%.*]] = extractelement <4 x i7> [[TMP20]], i32 0 +; RV64-NEXT: store i7 [[TMP25]], ptr [[TMP21]], align 1 +; RV64-NEXT: [[TMP26:%.*]] = extractelement <4 x i7> [[TMP20]], i32 1 +; RV64-NEXT: store i7 [[TMP26]], ptr [[TMP22]], align 1 +; RV64-NEXT: [[TMP27:%.*]] = extractelement <4 x i7> [[TMP20]], i32 2 +; RV64-NEXT: store i7 [[TMP27]], ptr [[TMP23]], align 1 +; RV64-NEXT: [[TMP28:%.*]] = extractelement <4 x i7> [[TMP20]], i32 3 +; RV64-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; RV64-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020 +; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: br i1 false, [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_BODY]]: +; +; RV32-LABEL: define void @vector_reverse_irregular_type( +; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV32-NEXT: [[ENTRY:.*]]: +; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV32-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV32-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1 +; RV32-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2 +; RV32-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3 +; RV32-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP0]], -1 +; RV32-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP1]], -1 +; RV32-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP2]], -1 +; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP3]], -1 +; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP4]] +; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP5]] +; RV32-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP6]] +; RV32-NEXT: [[TMP11:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP7]] +; RV32-NEXT: [[TMP12:%.*]] = load i7, ptr [[TMP8]], align 1 +; RV32-NEXT: [[TMP13:%.*]] = load i7, ptr [[TMP9]], align 1 +; RV32-NEXT: [[TMP14:%.*]] = load i7, ptr [[TMP10]], align 1 +; RV32-NEXT: [[TMP15:%.*]] = load i7, ptr [[TMP11]], align 1 +; RV32-NEXT: [[TMP16:%.*]] = insertelement <4 x i7> poison, i7 [[TMP12]], i32 0 +; RV32-NEXT: [[TMP17:%.*]] = insertelement <4 x i7> [[TMP16]], i7 [[TMP13]], i32 1 +; RV32-NEXT: [[TMP18:%.*]] = insertelement <4 x i7> [[TMP17]], i7 [[TMP14]], i32 2 +; RV32-NEXT: [[TMP19:%.*]] = insertelement <4 x i7> [[TMP18]], i7 [[TMP15]], i32 3 +; RV32-NEXT: [[TMP20:%.*]] = add <4 x i7> [[TMP19]], splat (i7 1) +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP4]] +; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP5]] +; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP6]] +; RV32-NEXT: [[TMP24:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP7]] +; RV32-NEXT: [[TMP25:%.*]] = extractelement <4 x i7> [[TMP20]], i32 0 +; RV32-NEXT: store i7 [[TMP25]], ptr [[TMP21]], align 1 +; RV32-NEXT: [[TMP26:%.*]] = extractelement <4 x i7> [[TMP20]], i32 1 +; RV32-NEXT: store i7 [[TMP26]], ptr [[TMP22]], align 1 +; RV32-NEXT: [[TMP27:%.*]] = extractelement <4 x i7> [[TMP20]], i32 2 +; RV32-NEXT: store i7 [[TMP27]], ptr [[TMP23]], align 1 +; RV32-NEXT: [[TMP28:%.*]] = extractelement <4 x i7> [[TMP20]], i32 3 +; RV32-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; RV32-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020 +; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: br i1 false, [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_BODY]]: +; +; RV64-UF2-LABEL: define void @vector_reverse_irregular_type( +; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV64-UF2-NEXT: [[ENTRY:.*]]: +; RV64-UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-UF2-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV64-UF2-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1 +; RV64-UF2-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2 +; RV64-UF2-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3 +; RV64-UF2-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], -4 +; RV64-UF2-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], -5 +; RV64-UF2-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], -6 +; RV64-UF2-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], -7 +; RV64-UF2-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP0]], -1 +; RV64-UF2-NEXT: [[TMP9:%.*]] = add nsw i64 [[TMP1]], -1 +; RV64-UF2-NEXT: [[TMP10:%.*]] = add nsw i64 [[TMP2]], -1 +; RV64-UF2-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP3]], -1 +; RV64-UF2-NEXT: [[TMP12:%.*]] = add nsw i64 [[TMP4]], -1 +; RV64-UF2-NEXT: [[TMP13:%.*]] = add nsw i64 [[TMP5]], -1 +; RV64-UF2-NEXT: [[TMP14:%.*]] = add nsw i64 [[TMP6]], -1 +; RV64-UF2-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP7]], -1 +; RV64-UF2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP8]] +; RV64-UF2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP9]] +; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP10]] +; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP11]] +; RV64-UF2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP12]] +; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP13]] +; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP14]] +; RV64-UF2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP15]] +; RV64-UF2-NEXT: [[TMP24:%.*]] = load i7, ptr [[TMP16]], align 1 +; RV64-UF2-NEXT: [[TMP25:%.*]] = load i7, ptr [[TMP17]], align 1 +; RV64-UF2-NEXT: [[TMP26:%.*]] = load i7, ptr [[TMP18]], align 1 +; RV64-UF2-NEXT: [[TMP27:%.*]] = load i7, ptr [[TMP19]], align 1 +; RV64-UF2-NEXT: [[TMP28:%.*]] = insertelement <4 x i7> poison, i7 [[TMP24]], i32 0 +; RV64-UF2-NEXT: [[TMP29:%.*]] = insertelement <4 x i7> [[TMP28]], i7 [[TMP25]], i32 1 +; RV64-UF2-NEXT: [[TMP30:%.*]] = insertelement <4 x i7> [[TMP29]], i7 [[TMP26]], i32 2 +; RV64-UF2-NEXT: [[TMP31:%.*]] = insertelement <4 x i7> [[TMP30]], i7 [[TMP27]], i32 3 +; RV64-UF2-NEXT: [[TMP32:%.*]] = load i7, ptr [[TMP20]], align 1 +; RV64-UF2-NEXT: [[TMP33:%.*]] = load i7, ptr [[TMP21]], align 1 +; RV64-UF2-NEXT: [[TMP34:%.*]] = load i7, ptr [[TMP22]], align 1 +; RV64-UF2-NEXT: [[TMP35:%.*]] = load i7, ptr [[TMP23]], align 1 +; RV64-UF2-NEXT: [[TMP36:%.*]] = insertelement <4 x i7> poison, i7 [[TMP32]], i32 0 +; RV64-UF2-NEXT: [[TMP37:%.*]] = insertelement <4 x i7> [[TMP36]], i7 [[TMP33]], i32 1 +; RV64-UF2-NEXT: [[TMP38:%.*]] = insertelement <4 x i7> [[TMP37]], i7 [[TMP34]], i32 2 +; RV64-UF2-NEXT: [[TMP39:%.*]] = insertelement <4 x i7> [[TMP38]], i7 [[TMP35]], i32 3 +; RV64-UF2-NEXT: [[TMP40:%.*]] = add <4 x i7> [[TMP31]], splat (i7 1) +; RV64-UF2-NEXT: [[TMP41:%.*]] = add <4 x i7> [[TMP39]], splat (i7 1) +; RV64-UF2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP8]] +; RV64-UF2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP9]] +; RV64-UF2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP10]] +; RV64-UF2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP11]] +; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP12]] +; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP13]] +; RV64-UF2-NEXT: [[TMP48:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP14]] +; RV64-UF2-NEXT: [[TMP49:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP15]] +; RV64-UF2-NEXT: [[TMP50:%.*]] = extractelement <4 x i7> [[TMP40]], i32 0 +; RV64-UF2-NEXT: store i7 [[TMP50]], ptr [[TMP42]], align 1 +; RV64-UF2-NEXT: [[TMP51:%.*]] = extractelement <4 x i7> [[TMP40]], i32 1 +; RV64-UF2-NEXT: store i7 [[TMP51]], ptr [[TMP43]], align 1 +; RV64-UF2-NEXT: [[TMP52:%.*]] = extractelement <4 x i7> [[TMP40]], i32 2 +; RV64-UF2-NEXT: store i7 [[TMP52]], ptr [[TMP44]], align 1 +; RV64-UF2-NEXT: [[TMP53:%.*]] = extractelement <4 x i7> [[TMP40]], i32 3 +; RV64-UF2-NEXT: store i7 [[TMP53]], ptr [[TMP45]], align 1 +; RV64-UF2-NEXT: [[TMP54:%.*]] = extractelement <4 x i7> [[TMP41]], i32 0 +; RV64-UF2-NEXT: store i7 [[TMP54]], ptr [[TMP46]], align 1 +; RV64-UF2-NEXT: [[TMP55:%.*]] = extractelement <4 x i7> [[TMP41]], i32 1 +; RV64-UF2-NEXT: store i7 [[TMP55]], ptr [[TMP47]], align 1 +; RV64-UF2-NEXT: [[TMP56:%.*]] = extractelement <4 x i7> [[TMP41]], i32 2 +; RV64-UF2-NEXT: store i7 [[TMP56]], ptr [[TMP48]], align 1 +; RV64-UF2-NEXT: [[TMP57:%.*]] = extractelement <4 x i7> [[TMP41]], i32 3 +; RV64-UF2-NEXT: store i7 [[TMP57]], ptr [[TMP49]], align 1 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; RV64-UF2-NEXT: [[TMP58:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1016 +; RV64-UF2-NEXT: br i1 [[TMP58]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: br i1 false, [[EXIT:label %.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 7, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_BODY]]: +; +entry: + br label %for.body + +for.body: + %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] + %iv.next = add nsw i64 %dec.iv, -1 + %arrayidx.b = getelementptr inbounds i7, ptr %B, i64 %iv.next + %0 = load i7, ptr %arrayidx.b, align 1 + %add = add i7 %0, 1 + %arrayidx.a = getelementptr inbounds i7, ptr %A, i64 %iv.next + store i7 %add, ptr %arrayidx.a, align 1 + %cmp = icmp ugt i64 %dec.iv, 1 + br i1 %cmp, label %for.body, label %exit, !llvm.loop !4 + +exit: + ret void +} + +!0 = distinct !{!0, !1, !2, !3} +!1 = !{!"llvm.loop.vectorize.width", i32 4} +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!3 = !{!"llvm.loop.vectorize.enable", i1 true} +!4 = distinct !{!4, !1, !3} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index ff9c585..b046f61 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -24,12 +24,16 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -46,7 +50,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -87,15 +91,19 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_LOAD]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> [[TMP10]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: @@ -109,7 +117,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -146,20 +154,24 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_LOAD]] -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP10]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) -; CHECK-NEXT: [[TMP11]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]] -; CHECK-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[VEC_PHI]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP11]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP7]]) +; CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP12]]) +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP11]]) ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] @@ -175,7 +187,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] @@ -217,13 +229,17 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: @@ -235,7 +251,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -272,14 +288,18 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; CHECK-NEXT: store i64 [[V]], ptr [[B:%.*]], align 8 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: @@ -292,7 +312,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -363,15 +383,19 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1024) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr [[TMP9]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: @@ -385,7 +409,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll index b4afdd7..cd53ea0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll @@ -1,17 +1,17 @@ ; REQUIRES: asserts -; RUN: opt < %s -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -mtriple riscv64-linux-gnu -mattr=+v,+f -S -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s +; RUN: opt < %s -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -force-tail-folding-style=data \ +; RUN: -mtriple riscv64-linux-gnu -mattr=+v,+f -S -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=DATA ; RUN: opt < %s -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple riscv64-linux-gnu -force-tail-folding-style=data-with-evl -mattr=+v,+f -S \ ; RUN: -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=EVL -; CHECK: Cost of 2 for VF 2: EMIT{{.*}} = active lane mask -; CHECK: Cost of 4 for VF 4: EMIT{{.*}} = active lane mask -; CHECK: Cost of 8 for VF 8: EMIT{{.*}} = active lane mask -; CHECK: Cost of 2 for VF vscale x 1: EMIT{{.*}} = active lane mask -; CHECK: Cost of 4 for VF vscale x 2: EMIT{{.*}} = active lane mask -; CHECK: Cost of 8 for VF vscale x 4: EMIT{{.*}} = active lane mask +; DATA: Cost of 2 for VF 2: EMIT{{.*}} = active lane mask +; DATA: Cost of 4 for VF 4: EMIT{{.*}} = active lane mask +; DATA: Cost of 8 for VF 8: EMIT{{.*}} = active lane mask +; DATA: Cost of 2 for VF vscale x 1: EMIT{{.*}} = active lane mask +; DATA: Cost of 4 for VF vscale x 2: EMIT{{.*}} = active lane mask +; DATA: Cost of 8 for VF vscale x 4: EMIT{{.*}} = active lane mask ; EVL: Cost of 1 for VF vscale x 1: EMIT{{.*}} = EXPLICIT-VECTOR-LENGTH ; EVL: Cost of 1 for VF vscale x 2: EMIT{{.*}} = EXPLICIT-VECTOR-LENGTH diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll index 528cec0..b56e712 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll @@ -170,15 +170,11 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32() -; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP13]] -; CHECK-NEXT: [[VEC_IV:%.*]] = add <vscale x 4 x i32> [[BROADCAST_SPLAT4]], [[TMP14]] -; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i32> [[VEC_IV]], i32 0 -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 [[TMP15]], i32 9) -; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> zeroinitializer -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[BROADCAST_SPLAT2]], i32 1, <vscale x 4 x i1> [[TMP11]]) +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]] +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> align 1 [[BROADCAST_SPLAT2]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -199,7 +195,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: [[ADD]] = add i8 [[F_039]], 1 ; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[F_039]] to i32 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -298,7 +294,7 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 ; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> [[BROADCAST_SPLAT6]], i32 8, <vscale x 2 x i1> [[TMP8]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -319,7 +315,7 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 ; CHECK: [[LOOP_LATCH]]: ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[V]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -359,8 +355,9 @@ attributes #1 = { "target-features"="+64bit,+v" } ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META7:![0-9]+]], [[META2]]} +; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 8baf9d9..c6955f1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -2,9 +2,6 @@ ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=SCALABLE ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=off -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=FIXEDLEN ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=TF-SCALABLE -; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=off -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=TF-FIXEDLEN - - target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" @@ -103,15 +100,19 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[TMP7]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]]) +; TF-SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: @@ -126,44 +127,10 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @uniform_load( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: [[TMP0:%.*]] = load i64, ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body @@ -277,22 +244,6 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ] ; TF-SCALABLE-NEXT: ret i64 [[V_LCSSA]] ; -; TF-FIXEDLEN-LABEL: define i64 @uniform_load_outside_use( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END:.*]], label %[[FOR_BODY]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: ret i64 [[V_LCSSA]] -; entry: br label %for.body @@ -437,25 +388,31 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; TF-SCALABLE-NEXT: [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP5]], splat (i64 1) ; TF-SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]] -; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP4]] -; TF-SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 -; TF-SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 1025) +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP11]] +; TF-SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 +; TF-SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer +; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IND]], splat (i64 1024) ; TF-SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 4 x i64> [[VEC_IND]], splat (i64 10) ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> zeroinitializer -; TF-SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i64> poison) +; TF-SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP10]], i32 [[TMP7]]) ; TF-SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr [[TMP13]], i32 8, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]]) +; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -474,55 +431,10 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @conditional_uniform_load( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x ptr> poison, ptr [[B]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x ptr> [[BROADCAST_SPLATINSERT]], <4 x ptr> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i64> [[VEC_IND]], splat (i64 10) -; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP1]], <4 x i1> zeroinitializer -; TF-FIXEDLEN-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> [[BROADCAST_SPLAT]], i32 8, <4 x i1> [[TMP2]], <4 x i64> poison) -; TF-FIXEDLEN-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> [[WIDE_MASKED_GATHER]], <4 x i64> zeroinitializer -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[PREDPHI]], ptr [[TMP4]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) -; TF-FIXEDLEN-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; TF-FIXEDLEN-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; TF-FIXEDLEN-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] -; TF-FIXEDLEN: [[DO_LOAD]]: -; TF-FIXEDLEN-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: br label %[[LATCH]] -; TF-FIXEDLEN: [[LATCH]]: -; TF-FIXEDLEN-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ] -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body @@ -640,17 +552,21 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 1 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[TMP7]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]]) +; TF-SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -663,44 +579,10 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @uniform_load_unaligned( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: [[TMP0:%.*]] = load i64, ptr [[B]], align 1 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body @@ -813,15 +695,19 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]]) +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = zext i32 [[TMP5]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -834,44 +720,10 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @uniform_store( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[V]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body @@ -1003,22 +855,27 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; TF-SCALABLE-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP5]], splat (i64 1) ; TF-SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] -; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP4]] -; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 -; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) -; TF-SCALABLE-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[VEC_IND]], <vscale x 2 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; TF-SCALABLE-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64 +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP13]] +; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 +; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[VEC_IND]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT3]], ptr [[TMP11]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT3]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]]) +; TF-SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -1031,71 +888,10 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @uniform_store_of_loop_varying( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[V]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE6:.*]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: [[TMP0:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0 -; TF-FIXEDLEN-NEXT: br i1 [[TMP0]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; TF-FIXEDLEN: [[PRED_STORE_IF]]: -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 -; TF-FIXEDLEN-NEXT: store i64 [[TMP1]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: br label %[[PRED_STORE_CONTINUE]] -; TF-FIXEDLEN: [[PRED_STORE_CONTINUE]]: -; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1 -; TF-FIXEDLEN-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]] -; TF-FIXEDLEN: [[PRED_STORE_IF1]]: -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 1 -; TF-FIXEDLEN-NEXT: store i64 [[TMP3]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: br label %[[PRED_STORE_CONTINUE2]] -; TF-FIXEDLEN: [[PRED_STORE_CONTINUE2]]: -; TF-FIXEDLEN-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2 -; TF-FIXEDLEN-NEXT: br i1 [[TMP4]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] -; TF-FIXEDLEN: [[PRED_STORE_IF3]]: -; TF-FIXEDLEN-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 2 -; TF-FIXEDLEN-NEXT: store i64 [[TMP5]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: br label %[[PRED_STORE_CONTINUE4]] -; TF-FIXEDLEN: [[PRED_STORE_CONTINUE4]]: -; TF-FIXEDLEN-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3 -; TF-FIXEDLEN-NEXT: br i1 [[TMP6]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6]] -; TF-FIXEDLEN: [[PRED_STORE_IF5]]: -; TF-FIXEDLEN-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 -; TF-FIXEDLEN-NEXT: store i64 [[TMP7]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: br label %[[PRED_STORE_CONTINUE6]] -; TF-FIXEDLEN: [[PRED_STORE_CONTINUE6]]: -; TF-FIXEDLEN-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP9]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: store i64 [[IV]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body @@ -1240,24 +1036,28 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; TF-SCALABLE-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP5]], splat (i64 1) ; TF-SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] -; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP4]] -; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 -; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP11]] +; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 +; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 2 x i64> [[VEC_IND]], splat (i64 10) -; TF-SCALABLE-NEXT: [[TMP9:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> zeroinitializer -; TF-SCALABLE-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> [[BROADCAST_SPLAT2]], i32 8, <vscale x 2 x i1> [[TMP9]]) +; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT2]], <vscale x 2 x i1> [[TMP10]], i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr [[TMP13]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]]) +; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP9]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -1275,55 +1075,10 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @conditional_uniform_store( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[V]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x ptr> poison, ptr [[B]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x ptr> [[BROADCAST_SPLATINSERT1]], <4 x ptr> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i64> [[VEC_IND]], splat (i64 10) -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP0]], <4 x i1> zeroinitializer -; TF-FIXEDLEN-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[BROADCAST_SPLAT]], <4 x ptr> [[BROADCAST_SPLAT2]], i32 8, <4 x i1> [[TMP1]]) -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) -; TF-FIXEDLEN-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; TF-FIXEDLEN-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; TF-FIXEDLEN-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] -; TF-FIXEDLEN: [[DO_STORE]]: -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-FIXEDLEN-NEXT: br label %[[LATCH]] -; TF-FIXEDLEN: [[LATCH]]: -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body @@ -1442,15 +1197,19 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-SCALABLE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 1025) +; TF-SCALABLE-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]] +; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 ; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0 -; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]]) +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = zext i32 [[TMP5]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[INDEX]] +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -1463,44 +1222,10 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; -; TF-FIXEDLEN-LABEL: define void @uniform_store_unaligned( -; TF-FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-FIXEDLEN-NEXT: [[ENTRY:.*]]: -; TF-FIXEDLEN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; TF-FIXEDLEN: [[VECTOR_PH]]: -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[V]], i64 0 -; TF-FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; TF-FIXEDLEN-NEXT: br label %[[VECTOR_BODY:.*]] -; TF-FIXEDLEN: [[VECTOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TF-FIXEDLEN-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 1025) -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 1 -; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0 -; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]]) -; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028 -; TF-FIXEDLEN-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] -; TF-FIXEDLEN: [[MIDDLE_BLOCK]]: -; TF-FIXEDLEN-NEXT: br label %[[FOR_END:.*]] -; TF-FIXEDLEN: [[SCALAR_PH]]: -; TF-FIXEDLEN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] -; TF-FIXEDLEN-NEXT: br label %[[FOR_BODY:.*]] -; TF-FIXEDLEN: [[FOR_BODY]]: -; TF-FIXEDLEN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 1 -; TF-FIXEDLEN-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-FIXEDLEN-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-FIXEDLEN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-FIXEDLEN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-FIXEDLEN-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] -; TF-FIXEDLEN: [[FOR_END]]: -; TF-FIXEDLEN-NEXT: ret void -; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll index fe6a693..acfcf90 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll @@ -16,48 +16,39 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IF-EVL: vector.ph: ; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 +; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; IF-EVL-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 ; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], [[TMP6]] ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 ; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 -; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 2 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; IF-EVL-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; IF-EVL-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i64> [[TMP10]], splat (i64 1) ; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP12]] ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0 +; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64 +; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP13]] +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0 ; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer -; IF-EVL-NEXT: [[STEP_ADD:%.*]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; IF-EVL-NEXT: [[TMP19:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; IF-EVL-NEXT: [[TMP20:%.*]] = icmp ule <vscale x 4 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], <vscale x 4 x i64> [[VEC_IND]], i32 0 -; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], <vscale x 4 x i64> [[STEP_ADD]], i32 0 -; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> [[TMP19]], <vscale x 4 x i32> poison) -; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP22]], i32 4, <vscale x 4 x i1> [[TMP20]], <vscale x 4 x i32> poison) +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) ; IF-EVL-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], <vscale x 4 x i64> [[VEC_IND]], i32 1 -; IF-EVL-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], <vscale x 4 x i64> [[STEP_ADD]], i32 1 -; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP23]], i32 4, <vscale x 4 x i1> [[TMP19]], <vscale x 4 x i32> poison) -; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP24]], i32 4, <vscale x 4 x i1> [[TMP20]], <vscale x 4 x i32> poison) -; IF-EVL-NEXT: [[TMP25:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_GATHER4]], [[WIDE_MASKED_GATHER]] +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP23]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) ; IF-EVL-NEXT: [[TMP26:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_GATHER5]], [[WIDE_MASKED_GATHER3]] -; IF-EVL-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] +; IF-EVL-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP27]], i32 0 -; IF-EVL-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP30]], 4 -; IF-EVL-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[TMP27]], i64 [[TMP31]] -; IF-EVL-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP25]], ptr [[TMP29]], i32 4, <vscale x 4 x i1> [[TMP19]]) -; IF-EVL-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP26]], ptr [[TMP32]], i32 4, <vscale x 4 x i1> [[TMP20]]) -; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]] -; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP26]], ptr align 4 [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]]) +; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP11]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]] +; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] ; IF-EVL-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: @@ -76,50 +67,36 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; ; NO-VP-LABEL: @interleave( ; NO-VP-NEXT: entry: ; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 ; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; NO-VP: vector.ph: ; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 ; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP4]], 4 -; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP8]], 2 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; NO-VP-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 0 -; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 1 -; NO-VP-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], [[TMP10]] -; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], i64 [[INDEX]], i32 0 -; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[TMP11]], i32 0 -; NO-VP-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP12]], align 4 -; NO-VP-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]]) -; NO-VP-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0 -; NO-VP-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1 +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], i64 [[INDEX]], i32 0 ; NO-VP-NEXT: [[WIDE_VEC1:%.*]] = load <vscale x 8 x i32>, ptr [[TMP13]], align 4 ; NO-VP-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]]) ; NO-VP-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0 ; NO-VP-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 1 -; NO-VP-NEXT: [[TMP20:%.*]] = add nsw <vscale x 4 x i32> [[TMP16]], [[TMP15]] ; NO-VP-NEXT: [[TMP21:%.*]] = add nsw <vscale x 4 x i32> [[TMP19]], [[TMP18]] ; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] ; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 0 -; NO-VP-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP26:%.*]] = mul nuw i64 [[TMP25]], 4 -; NO-VP-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 [[TMP26]] -; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP20]], ptr [[TMP24]], align 4 -; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP21]], ptr [[TMP27]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP21]], ptr [[TMP24]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: @@ -163,6 +140,5 @@ for.cond.cleanup: ret void } -!0 = distinct !{!0, !1, !2} -!1 = !{!"llvm.loop.interleave.count", i32 2} -!2 = !{!"llvm.loop.vectorize.enable", i1 true} +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll new file mode 100644 index 0000000..d7c9ce4 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll @@ -0,0 +1,80 @@ +; This is the loop in c++ being vectorize in this file with +;vector.reverse +; #pragma clang loop vectorize_width(4, scalable) +; for (int i = N-1; i >= 0; --i) +; a[i] = b[i] + 1.0; + +; REQUIRES: asserts +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \ +; RUN: -debug-only=loop-vectorize -scalable-vectorization=on \ +; RUN: -disable-output < %s 2>&1 | FileCheck %s + +define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) { +; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF>=1' { +; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF +; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count +; CHECK-NEXT: vp<[[OTC:%.+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[OTC]]> = EXPAND SCEV (1 + (-1 * (1 umin %n))<nuw><nsw> + %n) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: vp<[[RESUME_IV_A:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1> +; CHECK-NEXT: vp<[[RESUME_IV_B:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[INDUCTION:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]> +; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[INDUCTION]]> * ir<-1> +; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[VF]]> +; CHECK-NEXT: CLONE ir<[[IDX:%.+]]> = add nsw vp<[[SCALAR_STEPS]]>, ir<-1> +; CHECK-NEXT: CLONE ir<[[IDX_PROM:%.+]]> = zext ir<[[IDX]]> +; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_B:%.+]]> = getelementptr inbounds ir<[[B:%.+]]>, ir<[[IDX_PROM]]> +; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_B]]>, vp<[[VF]]> +; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = load vp<[[VEC_END_PTR_B]]> +; CHECK-NEXT: WIDEN ir<[[ADD_RESULT:%.+]]> = add ir<[[VAL_B]]>, ir<1> +; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_A:%.+]]> = getelementptr inbounds ir<[[A:%.+]]>, ir<[[IDX_PROM]]> +; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_A]]>, vp<[[VF]]> +; CHECK-NEXT: WIDEN store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]> +; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add nuw vp<[[INDUCTION]]>, vp<[[VFxUF]]> +; CHECK-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq vp<[[OTC]]>, vp<[[VTC]]> +; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]> +; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<for.cond.cleanup>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[RESUME_IV_A]]>, middle.block ], [ ir<%n>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[RESUME_IV_B]]>, middle.block ], [ ir<%n>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<for.body> +; +entry: + br label %for.body + +for.body: + %indvars.iv = phi i32 [ %n, %entry ], [ %indvars.iv.next, %for.body ] + %i.0.in8 = phi i32 [ %n, %entry ], [ %i.0, %for.body ] + %i.0 = add nsw i32 %i.0.in8, -1 + %idxprom = zext i32 %i.0 to i64 + %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom + %1 = load i32, ptr %arrayidx, align 4 + %add9 = add i32 %1, 1 + %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom + store i32 %add9, ptr %arrayidx3, align 4 + %cmp = icmp ugt i32 %indvars.iv, 1 + %indvars.iv.next = add nsw i32 %indvars.iv, -1 + br i1 %cmp, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/basic.ll b/llvm/test/Transforms/MemProfContextDisambiguation/basic.ll index 323df12..1784c2f 100644 --- a/llvm/test/Transforms/MemProfContextDisambiguation/basic.ll +++ b/llvm/test/Transforms/MemProfContextDisambiguation/basic.ll @@ -121,13 +121,14 @@ attributes #6 = { builtin } !12 = !{i64 789, i64 300} !13 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !14, producer: "clang version 21.0.0git (git@github.com:llvm/llvm-project.git e391301e0e4d9183fe06e69602e87b0bc889aeda)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) !14 = !DIFile(filename: "basic.cc", directory: "", checksumkind: CSK_MD5, checksum: "8636c46e81402013b9d54e8307d2f149") -!15 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !14, file: !14, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !13) +!15 = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !14, file: !14, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !13, declaration: !22) !16 = !DISubroutineType(types: !17) !17 = !{!18} !18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !19, size: 64) !19 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_signed_char) !20 = !{i32 7, !"Dwarf Version", i32 5} !21 = !{i32 2, !"Debug Info Version", i32 3} +!22 = !DISubprogram(name: "bar", linkageName: "_Z3barv", scope: !14, file: !14, line: 1, type: !16, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) ; DUMP: CCG before cloning: ; DUMP: Callsite Context Graph: @@ -290,7 +291,8 @@ attributes #6 = { builtin } ; IR: attributes #[[NOTCOLD]] = { builtin "memprof"="notcold" } ; IR: attributes #[[COLD]] = { builtin "memprof"="cold" } ;; Make sure the clone's linkageName was updated. -; IR: ![[SP]] = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv.memprof.1" +; IR: ![[SP]] = distinct !DISubprogram(name: "bar", linkageName: "_Z3barv.memprof.1", {{.*}} declaration: ![[SP2:[0-9]+]]) +; IR: ![[SP2]] = !DISubprogram(name: "bar", linkageName: "_Z3barv.memprof.1" ; STATS: 1 memprof-context-disambiguation - Number of cold static allocations (possibly cloned) diff --git a/llvm/test/Transforms/SLPVectorizer/X86/buildvector-schedule-for-subvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/buildvector-schedule-for-subvector.ll index 7408ba1..07fdc9d 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/buildvector-schedule-for-subvector.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/buildvector-schedule-for-subvector.ll @@ -4,6 +4,9 @@ define void @test() { ; CHECK-LABEL: define void @test() { ; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 1, 0 +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> <i32 0, i32 0, i32 0, i32 poison>, i32 [[ADD]], i32 3 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i32> [[TMP0]], zeroinitializer ; CHECK-NEXT: [[ICMP:%.*]] = icmp samesign ult i32 0, 0 ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[ICMP]], i32 0, i32 0 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SELECT]] to i64 @@ -14,7 +17,8 @@ define void @test() { ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> <i32 0, i32 0, i32 0, i32 poison>, i32 [[CALL]], i32 3 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP3]], <4 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison> -; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i1> [[TMP4]], <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 undef, i1 undef, i1 undef, i1 undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i1> [[TMP4]], <8 x i1> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> ; CHECK-NEXT: ret void ; bb: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/full-match-with-poison-scalar.ll b/llvm/test/Transforms/SLPVectorizer/X86/full-match-with-poison-scalar.ll index 5e3d471..15ba98f 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/full-match-with-poison-scalar.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/full-match-with-poison-scalar.ll @@ -7,10 +7,17 @@ define i32 @test() { ; CHECK-NEXT: br label %[[FUNC_135_EXIT_I:.*]] ; CHECK: [[FUNC_135_EXIT_I]]: ; CHECK-NEXT: [[G_228_PROMOTED166_I1105_I:%.*]] = phi i32 [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 poison, i32 poison, i32 poison>, i32 [[G_228_PROMOTED166_I1105_I]], i32 0 -; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[TMP0]], <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 poison, i32 poison, i32 poison> -; CHECK-NEXT: [[TMP2:%.*]] = add <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 poison, i32 poison, i32 poison>, [[TMP1]] -; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> poison, <16 x i32> <i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3, i32 4> +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 poison, i32 poison>, i32 [[G_228_PROMOTED166_I1105_I]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 1> +; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <12 x i32> <i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 0, i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i32> poison, i32 [[G_228_PROMOTED166_I1105_I]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i32> [[TMP5]], <16 x i32> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i32> [[TMP7]], <16 x i32> [[TMP9]], <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 23, i32 8, i32 9, i32 10, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <12 x i32> [[TMP3]], <12 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <16 x i32> [[TMP17]], <16 x i32> [[TMP8]], <16 x i32> <i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 23, i32 24, i32 25, i32 26, i32 2, i32 2, i32 2, i32 2, i32 3> ; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt <16 x i32> [[TMP11]], zeroinitializer ; CHECK-NEXT: [[TMP13:%.*]] = icmp ult <16 x i32> [[TMP11]], zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <16 x i1> [[TMP12]], <16 x i1> [[TMP13]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 31> diff --git a/llvm/test/Transforms/SLPVectorizer/X86/node-outside-used-only.ll b/llvm/test/Transforms/SLPVectorizer/X86/node-outside-used-only.ll index 03d76ef..1c482e0 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/node-outside-used-only.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/node-outside-used-only.ll @@ -4,10 +4,11 @@ define i64 @test() { ; CHECK-LABEL: define i64 @test() { ; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> <i32 0, i32 poison>, i32 0, i32 1 ; CHECK-NEXT: br label %[[BB1:.*]] ; CHECK: [[BB1]]: ; CHECK-NEXT: [[TMP1:%.*]] = phi <2 x i32> [ zeroinitializer, %[[BB]] ], [ [[TMP4:%.*]], %[[BB5:.*]] ] -; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> [[TMP0]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP2]], <2 x i32> <i32 0, i32 3> ; CHECK-NEXT: [[TMP4]] = or <2 x i32> [[TMP3]], zeroinitializer ; CHECK-NEXT: br label %[[BB5]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/non-schedulable-instructions-become-schedulable.ll b/llvm/test/Transforms/SLPVectorizer/X86/non-schedulable-instructions-become-schedulable.ll index 6bb52e0..652abef 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/non-schedulable-instructions-become-schedulable.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/non-schedulable-instructions-become-schedulable.ll @@ -7,17 +7,19 @@ define void @test() { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br label %[[BB1:.*]] ; CHECK: [[IF_THEN_I_I:.*]]: -; CHECK-NEXT: br label %[[BB3:.*]] +; CHECK-NEXT: br label %[[BB5:.*]] ; CHECK: [[BB1]]: ; CHECK-NEXT: [[TMP0:%.*]] = zext i1 false to i64 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i64> <i64 0, i64 0, i64 poison, i64 0>, i64 [[TMP0]], i32 2 -; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i64> zeroinitializer, [[TMP1]] -; CHECK-NEXT: br i1 false, label %[[BB3]], label %[[BB2:.*]] -; CHECK: [[BB3]]: -; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i64> [ [[TMP2]], %[[BB1]] ], [ poison, %[[IF_THEN_I_I]] ] +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> <i64 poison, i64 0>, i64 [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i64> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> <i64 0, i64 0, i64 poison, i64 poison>, <4 x i64> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 4, i32 5> +; CHECK-NEXT: br i1 false, label %[[BB5]], label %[[BB2:.*]] +; CHECK: [[BB5]]: +; CHECK-NEXT: [[TMP6:%.*]] = phi <4 x i64> [ [[TMP4]], %[[BB1]] ], [ poison, %[[IF_THEN_I_I]] ] ; CHECK-NEXT: br label %[[BB2]] ; CHECK: [[BB2]]: -; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x i64> [ [[TMP4]], %[[BB3]] ], [ [[TMP2]], %[[BB1]] ] +; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x i64> [ [[TMP6]], %[[BB5]] ], [ [[TMP4]], %[[BB1]] ] ; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr getelementptr inbounds nuw (i8, ptr null, i64 40), align 8 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr47642.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr47642.ll index 782aada..a4949bc 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/pr47642.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/pr47642.ll @@ -6,9 +6,14 @@ target triple = "x86_64-unknown-linux-gnu" define <4 x i32> @foo(<4 x i32> %x, i32 %f) { ; CHECK-LABEL: @foo( -; CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x i32> poison, i32 [[F:%.*]], i32 0 -; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[VECINIT]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[VECINIT51:%.*]] = add <4 x i32> [[TMP2]], <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x i32> undef, i32 [[F:%.*]], i32 0 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[F]], 1 +; CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <4 x i32> [[VECINIT]], i32 [[ADD]], i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> poison, i32 [[F]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = add nsw <2 x i32> [[TMP2]], <i32 2, i32 3> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison> +; CHECK-NEXT: [[VECINIT51:%.*]] = shufflevector <4 x i32> [[VECINIT1]], <4 x i32> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 4, i32 5> ; CHECK-NEXT: ret <4 x i32> [[VECINIT51]] ; %vecinit = insertelement <4 x i32> undef, i32 %f, i32 0 diff --git a/llvm/test/Transforms/SLPVectorizer/alternate-non-profitable.ll b/llvm/test/Transforms/SLPVectorizer/alternate-non-profitable.ll index 125c2dc..ad4daea 100644 --- a/llvm/test/Transforms/SLPVectorizer/alternate-non-profitable.ll +++ b/llvm/test/Transforms/SLPVectorizer/alternate-non-profitable.ll @@ -150,9 +150,9 @@ define <2 x i32> @replace_through_int_casts_ele0_only(i16 %inp, <2 x i16> %dead) define <2 x i8> @replace_through_binop_fail_cant_speculate(i8 %inp, <2 x i8> %d, <2 x i8> %any) { ; CHECK-LABEL: define <2 x i8> @replace_through_binop_fail_cant_speculate( ; CHECK-SAME: i8 [[INP:%.*]], <2 x i8> [[D:%.*]], <2 x i8> [[ANY:%.*]]) { -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i8> poison, i8 [[INP]], i32 0 -; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i8> [[TMP3]], <2 x i8> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[V:%.*]] = add <2 x i8> [[TMP2]], <i8 0, i8 5> +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[INP]], 5 +; CHECK-NEXT: [[V0:%.*]] = insertelement <2 x i8> poison, i8 [[INP]], i64 0 +; CHECK-NEXT: [[V:%.*]] = insertelement <2 x i8> [[V0]], i8 [[ADD]], i64 1 ; CHECK-NEXT: [[DIV0:%.*]] = sdiv <2 x i8> splat (i8 -128), [[V]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[INP]], 123 ; CHECK-NEXT: [[R:%.*]] = insertelement <2 x i8> [[DIV0]], i8 [[TMP1]], i64 0 diff --git a/llvm/test/Transforms/Scalarizer/extractvalue-struct-of-vectors.ll b/llvm/test/Transforms/Scalarizer/extractvalue-struct-of-vectors.ll new file mode 100644 index 0000000..b8d1b92 --- /dev/null +++ b/llvm/test/Transforms/Scalarizer/extractvalue-struct-of-vectors.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes='function(scalarizer)' -S < %s | FileCheck %s + +define void @func(<2 x i32> noundef %a, <2 x i32> noundef %b) { +; CHECK-LABEL: define void @func( +; CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) { +; CHECK-NEXT: [[A_I0:%.*]] = extractelement <2 x i32> [[A]], i64 0 +; CHECK-NEXT: [[B_I0:%.*]] = extractelement <2 x i32> [[B]], i64 0 +; CHECK-NEXT: [[UADDC_I0:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A_I0]], i32 [[B_I0]]) +; CHECK-NEXT: [[A_I1:%.*]] = extractelement <2 x i32> [[A]], i64 1 +; CHECK-NEXT: [[B_I1:%.*]] = extractelement <2 x i32> [[B]], i64 1 +; CHECK-NEXT: [[UADDC_I1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A_I1]], i32 [[B_I1]]) +; CHECK-NEXT: [[CARRY_ELEM1:%.*]] = extractvalue { i32, i1 } [[UADDC_I0]], 1 +; CHECK-NEXT: [[CARRY_ELEM11:%.*]] = extractvalue { i32, i1 } [[UADDC_I1]], 1 +; CHECK-NEXT: [[CARRY_ZEXT_I0:%.*]] = zext i1 [[CARRY_ELEM1]] to i32 +; CHECK-NEXT: [[CARRY_ZEXT_I1:%.*]] = zext i1 [[CARRY_ELEM11]] to i32 +; CHECK-NEXT: ret void +; + %uaddc = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b) + %carry = extractvalue { <2 x i32>, <2 x i1> } %uaddc, 1 + %carry_zext = zext <2 x i1> %carry to <2 x i32> + ret void +} diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/mips64_eh.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/mips64_eh.ll.expected index 897209a..56058bb 100644 --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/mips64_eh.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/mips64_eh.ll.expected @@ -8,17 +8,17 @@ define i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset 31, -8 -; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: .Ltmp0: # EH_LABEL ; CHECK-NEXT: jal foo ; CHECK-NEXT: nop -; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .Ltmp1: # EH_LABEL ; CHECK-NEXT: # %bb.1: # %good ; CHECK-NEXT: addiu $2, $zero, 5 ; CHECK-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload ; CHECK-NEXT: jr $ra ; CHECK-NEXT: daddiu $sp, $sp, 16 ; CHECK-NEXT: .LBB0_2: # %bad -; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: .Ltmp2: # EH_LABEL ; CHECK-NEXT: jal _Unwind_Resume ; CHECK-NEXT: nop %1 = invoke i32 @foo() to label %good unwind label %bad diff --git a/llvm/test/tools/dxil-dis/lifetimes.ll b/llvm/test/tools/dxil-dis/lifetimes.ll deleted file mode 100644 index cb3e629..0000000 --- a/llvm/test/tools/dxil-dis/lifetimes.ll +++ /dev/null @@ -1,38 +0,0 @@ -; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s -target triple = "dxil-unknown-shadermodel6.7-library" - -define void @test_lifetimes() { -; CHECK-LABEL: test_lifetimes -; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [2 x i32], align 4 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i32], [2 x i32]* [[ALLOCA]], i32 0, i32 0 -; CHECK-NEXT: [[BITCAST:%.*]] = bitcast [2 x i32]* [[ALLOCA]] to i8* -; CHECK-NEXT: call void @llvm.lifetime.start(i64 4, i8* nonnull [[BITCAST]]) -; CHECK-NEXT: store i32 0, i32* [[GEP]], align 4 -; CHECK-NEXT: [[BITCAST:%.*]] = bitcast [2 x i32]* [[ALLOCA]] to i8* -; CHECK-NEXT: call void @llvm.lifetime.end(i64 4, i8* nonnull [[BITCAST]]) -; CHECK-NEXT: ret void -; - %a = alloca [2 x i32], align 4 - %gep = getelementptr [2 x i32], ptr %a, i32 0, i32 0 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a) - store i32 0, ptr %gep, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a) - ret void -} - -; CHECK-DAG: attributes [[LIFETIME_ATTRS:#.*]] = { nounwind } - -; CHECK-DAG: ; Function Attrs: nounwind -; CHECK-DAG: declare void @llvm.lifetime.start(i64, i8* nocapture) [[LIFETIME_ATTRS]] - -; CHECK-DAG: ; Function Attrs: nounwind -; CHECK-DAG: declare void @llvm.lifetime.end(i64, i8* nocapture) [[LIFETIME_ATTRS]] - -; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64, ptr) #0 - -; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64, ptr) #0 - -attributes #0 = { nounwind memory(argmem: readwrite) } - diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp index 2867d48..2699e10 100644 --- a/llvm/tools/llvm-readobj/ELFDumper.cpp +++ b/llvm/tools/llvm-readobj/ELFDumper.cpp @@ -1087,26 +1087,25 @@ const EnumEntry<unsigned> ElfObjectFileType[] = { }; const EnumEntry<unsigned> ElfOSABI[] = { - {"SystemV", "UNIX - System V", ELF::ELFOSABI_NONE}, - {"HPUX", "UNIX - HP-UX", ELF::ELFOSABI_HPUX}, - {"NetBSD", "UNIX - NetBSD", ELF::ELFOSABI_NETBSD}, - {"GNU/Linux", "UNIX - GNU", ELF::ELFOSABI_LINUX}, - {"GNU/Hurd", "GNU/Hurd", ELF::ELFOSABI_HURD}, - {"Solaris", "UNIX - Solaris", ELF::ELFOSABI_SOLARIS}, - {"AIX", "UNIX - AIX", ELF::ELFOSABI_AIX}, - {"IRIX", "UNIX - IRIX", ELF::ELFOSABI_IRIX}, - {"FreeBSD", "UNIX - FreeBSD", ELF::ELFOSABI_FREEBSD}, - {"TRU64", "UNIX - TRU64", ELF::ELFOSABI_TRU64}, - {"Modesto", "Novell - Modesto", ELF::ELFOSABI_MODESTO}, - {"OpenBSD", "UNIX - OpenBSD", ELF::ELFOSABI_OPENBSD}, - {"OpenVMS", "VMS - OpenVMS", ELF::ELFOSABI_OPENVMS}, - {"NSK", "HP - Non-Stop Kernel", ELF::ELFOSABI_NSK}, - {"AROS", "AROS", ELF::ELFOSABI_AROS}, - {"FenixOS", "FenixOS", ELF::ELFOSABI_FENIXOS}, - {"CloudABI", "CloudABI", ELF::ELFOSABI_CLOUDABI}, - {"CUDA", "NVIDIA - CUDA", ELF::ELFOSABI_CUDA}, - {"Standalone", "Standalone App", ELF::ELFOSABI_STANDALONE} -}; + {"SystemV", "UNIX - System V", ELF::ELFOSABI_NONE}, + {"HPUX", "UNIX - HP-UX", ELF::ELFOSABI_HPUX}, + {"NetBSD", "UNIX - NetBSD", ELF::ELFOSABI_NETBSD}, + {"GNU/Linux", "UNIX - GNU", ELF::ELFOSABI_LINUX}, + {"GNU/Hurd", "GNU/Hurd", ELF::ELFOSABI_HURD}, + {"Solaris", "UNIX - Solaris", ELF::ELFOSABI_SOLARIS}, + {"AIX", "UNIX - AIX", ELF::ELFOSABI_AIX}, + {"IRIX", "UNIX - IRIX", ELF::ELFOSABI_IRIX}, + {"FreeBSD", "UNIX - FreeBSD", ELF::ELFOSABI_FREEBSD}, + {"TRU64", "UNIX - TRU64", ELF::ELFOSABI_TRU64}, + {"Modesto", "Novell - Modesto", ELF::ELFOSABI_MODESTO}, + {"OpenBSD", "UNIX - OpenBSD", ELF::ELFOSABI_OPENBSD}, + {"OpenVMS", "VMS - OpenVMS", ELF::ELFOSABI_OPENVMS}, + {"NSK", "HP - Non-Stop Kernel", ELF::ELFOSABI_NSK}, + {"AROS", "AROS", ELF::ELFOSABI_AROS}, + {"FenixOS", "FenixOS", ELF::ELFOSABI_FENIXOS}, + {"CloudABI", "CloudABI", ELF::ELFOSABI_CLOUDABI}, + {"CUDA", "NVIDIA - CUDA", ELF::ELFOSABI_CUDA}, + {"Standalone", "Standalone App", ELF::ELFOSABI_STANDALONE}}; const EnumEntry<unsigned> AMDGPUElfOSABI[] = { {"AMDGPU_HSA", "AMDGPU - HSA", ELF::ELFOSABI_AMDGPU_HSA}, @@ -1671,16 +1670,17 @@ const EnumEntry<unsigned> ElfHeaderAMDGPUFlagsABIVersion4[] = { }; const EnumEntry<unsigned> ElfHeaderNVPTXFlags[] = { - ENUM_ENT(EF_CUDA_SM20, "sm_20"), ENUM_ENT(EF_CUDA_SM21, "sm_21"), - ENUM_ENT(EF_CUDA_SM30, "sm_30"), ENUM_ENT(EF_CUDA_SM32, "sm_32"), - ENUM_ENT(EF_CUDA_SM35, "sm_35"), ENUM_ENT(EF_CUDA_SM37, "sm_37"), - ENUM_ENT(EF_CUDA_SM50, "sm_50"), ENUM_ENT(EF_CUDA_SM52, "sm_52"), - ENUM_ENT(EF_CUDA_SM53, "sm_53"), ENUM_ENT(EF_CUDA_SM60, "sm_60"), - ENUM_ENT(EF_CUDA_SM61, "sm_61"), ENUM_ENT(EF_CUDA_SM62, "sm_62"), - ENUM_ENT(EF_CUDA_SM70, "sm_70"), ENUM_ENT(EF_CUDA_SM72, "sm_72"), - ENUM_ENT(EF_CUDA_SM75, "sm_75"), ENUM_ENT(EF_CUDA_SM80, "sm_80"), - ENUM_ENT(EF_CUDA_SM86, "sm_86"), ENUM_ENT(EF_CUDA_SM87, "sm_87"), - ENUM_ENT(EF_CUDA_SM89, "sm_89"), ENUM_ENT(EF_CUDA_SM90, "sm_90"), + ENUM_ENT(EF_CUDA_SM20, "sm_20"), ENUM_ENT(EF_CUDA_SM21, "sm_21"), + ENUM_ENT(EF_CUDA_SM30, "sm_30"), ENUM_ENT(EF_CUDA_SM32, "sm_32"), + ENUM_ENT(EF_CUDA_SM35, "sm_35"), ENUM_ENT(EF_CUDA_SM37, "sm_37"), + ENUM_ENT(EF_CUDA_SM50, "sm_50"), ENUM_ENT(EF_CUDA_SM52, "sm_52"), + ENUM_ENT(EF_CUDA_SM53, "sm_53"), ENUM_ENT(EF_CUDA_SM60, "sm_60"), + ENUM_ENT(EF_CUDA_SM61, "sm_61"), ENUM_ENT(EF_CUDA_SM62, "sm_62"), + ENUM_ENT(EF_CUDA_SM70, "sm_70"), ENUM_ENT(EF_CUDA_SM72, "sm_72"), + ENUM_ENT(EF_CUDA_SM75, "sm_75"), ENUM_ENT(EF_CUDA_SM80, "sm_80"), + ENUM_ENT(EF_CUDA_SM86, "sm_86"), ENUM_ENT(EF_CUDA_SM87, "sm_87"), + ENUM_ENT(EF_CUDA_SM89, "sm_89"), ENUM_ENT(EF_CUDA_SM90, "sm_90"), + ENUM_ENT(EF_CUDA_SM100, "sm_100"), ENUM_ENT(EF_CUDA_SM120, "sm_120"), }; const EnumEntry<unsigned> ElfHeaderRISCVFlags[] = { @@ -3655,10 +3655,16 @@ template <class ELFT> void GNUELFDumper<ELFT>::printFileHeaders() { else if (e.e_machine == EM_XTENSA) ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderXtensaFlags), unsigned(ELF::EF_XTENSA_MACH)); - else if (e.e_machine == EM_CUDA) + else if (e.e_machine == EM_CUDA) { ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderNVPTXFlags), unsigned(ELF::EF_CUDA_SM)); - else if (e.e_machine == EM_AMDGPU) { + if (e.e_ident[ELF::EI_ABIVERSION] == ELF::ELFABIVERSION_CUDA_V1 && + (e.e_flags & ELF::EF_CUDA_ACCELERATORS_V1)) + ElfFlags += "a"; + else if (e.e_ident[ELF::EI_ABIVERSION] == ELF::ELFABIVERSION_CUDA_V2 && + (e.e_flags & ELF::EF_CUDA_ACCELERATORS)) + ElfFlags += "a"; + } else if (e.e_machine == EM_AMDGPU) { switch (e.e_ident[ELF::EI_ABIVERSION]) { default: break; diff --git a/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp b/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp index 6189d09..95c26b1 100644 --- a/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp +++ b/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp @@ -431,8 +431,8 @@ TEST_F(OpenMPDecompositionTest, Firstprivate3) { std::string Dir0 = stringify(Dec.output[0]); std::string Dir1 = stringify(Dec.output[1]); std::string Dir2 = stringify(Dec.output[2]); - ASSERT_EQ(Dir0, "target map(2, , , , (x))"); // (12), (27) - ASSERT_EQ(Dir1, "teams shared(x)"); // (6), (17) + ASSERT_EQ(Dir0, "target map(2, , , , , (x))"); // (12), (27) + ASSERT_EQ(Dir1, "teams shared(x)"); // (6), (17) ASSERT_EQ(Dir2, "distribute firstprivate(x) lastprivate(, (x))"); // (5), (21) } @@ -574,9 +574,9 @@ TEST_F(OpenMPDecompositionTest, Lastprivate3) { std::string Dir0 = stringify(Dec.output[0]); std::string Dir1 = stringify(Dec.output[1]); std::string Dir2 = stringify(Dec.output[2]); - ASSERT_EQ(Dir0, "target map(2, , , , (x))"); // (21), (27) - ASSERT_EQ(Dir1, "parallel shared(x)"); // (22) - ASSERT_EQ(Dir2, "do lastprivate(, (x))"); // (21) + ASSERT_EQ(Dir0, "target map(2, , , , , (x))"); // (21), (27) + ASSERT_EQ(Dir1, "parallel shared(x)"); // (22) + ASSERT_EQ(Dir2, "do lastprivate(, (x))"); // (21) } // SHARED @@ -984,9 +984,9 @@ TEST_F(OpenMPDecompositionTest, Reduction7) { std::string Dir0 = stringify(Dec.output[0]); std::string Dir1 = stringify(Dec.output[1]); std::string Dir2 = stringify(Dec.output[2]); - ASSERT_EQ(Dir0, "target map(2, , , , (x))"); // (36), (10) - ASSERT_EQ(Dir1, "parallel shared(x)"); // (36), (1), (4) - ASSERT_EQ(Dir2, "do reduction(, (3), (x))"); // (36) + ASSERT_EQ(Dir0, "target map(2, , , , , (x))"); // (36), (10) + ASSERT_EQ(Dir1, "parallel shared(x)"); // (36), (1), (4) + ASSERT_EQ(Dir2, "do reduction(, (3), (x))"); // (36) } // IF diff --git a/llvm/utils/gn/build/BUILD.gn b/llvm/utils/gn/build/BUILD.gn index 9b5254e..f080a4c6 100644 --- a/llvm/utils/gn/build/BUILD.gn +++ b/llvm/utils/gn/build/BUILD.gn @@ -179,6 +179,7 @@ config("compiler_defaults") { "_HAS_EXCEPTIONS=0", "_UNICODE", "UNICODE", + "CLANG_BUILD_STATIC", ] cflags += [ "/EHs-c-" ] cflags_cc += [ "/std:c++17" ] diff --git a/llvm/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn b/llvm/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn index ab5dae8..ac2ce0c 100644 --- a/llvm/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/unittests/Analysis/BUILD.gn @@ -8,6 +8,7 @@ unittest("ClangAnalysisTests") { "//clang/lib/Analysis", "//clang/lib/Basic", "//clang/lib/Frontend", + "//clang/lib/Testing", "//clang/lib/Tooling", "//llvm/lib/Support", ] @@ -17,6 +18,7 @@ unittest("ClangAnalysisTests") { "CloneDetectionTest.cpp", "ExprMutationAnalyzerTest.cpp", "IntervalPartitionTest.cpp", + "LifetimeSafetyTest.cpp", "MacroExpansionContextTest.cpp", "UnsafeBufferUsageTest.cpp", ] diff --git a/llvm/utils/gn/secondary/lldb/source/Plugins/Language/CPlusPlus/BUILD.gn b/llvm/utils/gn/secondary/lldb/source/Plugins/Language/CPlusPlus/BUILD.gn index 36d86bc..7ea5f8c 100644 --- a/llvm/utils/gn/secondary/lldb/source/Plugins/Language/CPlusPlus/BUILD.gn +++ b/llvm/utils/gn/secondary/lldb/source/Plugins/Language/CPlusPlus/BUILD.gn @@ -62,8 +62,10 @@ static_library("CPlusPlus") { "LibStdcppUniquePointer.cpp", "MSVCUndecoratedNameParser.cpp", "MsvcStl.cpp", + "MsvcStlAtomic.cpp", "MsvcStlSmartPointer.cpp", "MsvcStlTuple.cpp", + "MsvcStlUnordered.cpp", "MsvcStlVariant.cpp", "MsvcStlVector.cpp", ] |